File size: 4,144 Bytes
3427608
 
 
 
b177a48
 
 
afdb110
3427608
b177a48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54c3a81
b177a48
 
 
 
 
 
 
 
afdb110
 
 
e122c56
 
b177a48
 
 
3427608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b177a48
 
 
 
3427608
 
b177a48
 
 
 
 
afdb110
 
 
3427608
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from .imagenhub_models import load_imagenhub_model
# from .playground_api import load_playground_model
from .fal_api_models import load_fal_model
# from .videogenhub_models import load_videogenhub_model
from .huggingface_models import load_huggingface_model
from .replicate_api_models import load_replicate_model
from .openai_api_models import load_openai_model
from .other_api_models import load_other_model


IMAGE_GENERATION_MODELS = [ 
                            'replicate_SDXL_text2image',
                            'replicate_SD-v3.0_text2image',
                            'replicate_SD-v2.1_text2image',
                            'replicate_SD-v1.5_text2image',
                            'replicate_SDXL-Lightning_text2image',
                            'replicate_Kandinsky-v2.0_text2image',
                            'replicate_Kandinsky-v2.2_text2image',
                            'replicate_Proteus-v0.2_text2image',
                            'replicate_Playground-v2.0_text2image',
                            'replicate_Playground-v2.5_text2image',
                            'replicate_Dreamshaper-xl-turbo_text2image',
                            'replicate_SDXL-Deepcache_text2image',
                            'replicate_Openjourney-v4_text2image',
                            'replicate_LCM-v1.5_text2image',
                            'replicate_Realvisxl-v3.0_text2image',
                            'replicate_Realvisxl-v2.0_text2image',
                            'replicate_Pixart-Sigma_text2image',
                            'replicate_SSD-1b_text2image',
                            'replicate_Open-Dalle-v1.1_text2image',
                            'replicate_Deepfloyd-IF_text2image',
                            'huggingface_SD-turbo_text2image',
                            'huggingface_SDXL-turbo_text2image',
                            'huggingface_Stable-cascade_text2image',
                            'openai_Dalle-2_text2image',
                            'openai_Dalle-3_text2image',
                            # 'other_Midjourney-v6.0_text2image',
                            # 'other_Midjourney-v5.0_text2image',
                            ]


IMAGE_EDITION_MODELS = ['imagenhub_CycleDiffusion_edition', 'imagenhub_Pix2PixZero_edition', 'imagenhub_Prompt2prompt_edition',
                        'imagenhub_SDEdit_edition', 'imagenhub_InstructPix2Pix_edition', 
                        'imagenhub_MagicBrush_edition', 'imagenhub_PNP_edition', 
                        'imagenhub_InfEdit_edition', 'imagenhub_CosXLEdit_edition']
VIDEO_GENERATION_MODELS = ['fal_AnimateDiff_text2video',
                           'fal_AnimateDiffTurbo_text2video',
                           'videogenhub_LaVie_generation', 'videogenhub_VideoCrafter2_generation',
                           'videogenhub_ModelScope_generation', 'videogenhub_OpenSora_generation']


def load_pipeline(model_name):
    """
    Load a model pipeline based on the model name
    Args:
        model_name (str): The name of the model to load, should be of the form {source}_{name}_{type}
        the source can be either imagenhub or playground
        the name is the name of the model used to load the model
        the type is the type of the model, either generation or edition
    """
    model_source, model_name, model_type = model_name.split("_")
    # if model_source == "imagenhub":
    #     pipe = load_imagenhub_model(model_name, model_type)
    # elif model_source == "fal":
    #     pipe = load_fal_model(model_name, model_type)
    # elif model_source == "videogenhub":
    #     pipe = load_videogenhub_model(model_name)
    if model_source == "replicate":
        pipe = load_replicate_model(model_name, model_type)
    elif model_source == "huggingface":
        pipe = load_huggingface_model(model_name, model_type)
    elif model_source == "openai":
        pipe = load_openai_model(model_name, model_type)
    elif model_source == "other":
        pipe = load_other_model(model_name, model_type)
    else:
        raise ValueError(f"Model source {model_source} not supported")
    return pipe