File size: 4,395 Bytes
40ce629
 
 
 
3557c00
40ce629
 
f57ed6a
40ce629
f57ed6a
119d5c2
f57ed6a
40ce629
f57ed6a
98a2239
40ce629
 
51f1e70
38a5e47
cc52c45
 
c405107
51f1e70
0025f06
 
 
18f9c41
 
0025f06
40ce629
 
092a462
fe030b4
b703853
 
40ce629
27f8154
40ce629
 
 
972a2bf
 
 
abe9d47
 
 
 
 
 
 
 
 
 
 
 
 
39868fe
40e259d
 
39868fe
 
40e259d
39868fe
 
 
 
 
 
 
 
972a2bf
 
3361691
3557c00
 
 
 
 
 
 
 
 
 
 
 
 
 
3361691
40e259d
1f683a7
 
 
 
 
7f59eee
2a5cb12
e7d7286
092a462
fe030b4
 
 
 
 
 
5156054
5d42f5f
e7d7286
 
 
 
 
3361691
 
 
 
 
2a45a43
 
 
 
 
 
 
3361691
40e259d
3361691
2a45a43
 
 
3361691
3557c00
3361691
3557c00
 
 
 
27f8154
3361691
40e259d
 
78f6e98
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#!/usr/bin/env python

from __future__ import annotations

import random
import argparse
import functools
import os
import pickle
import sys
import subprocess

import gradio as gr
import numpy as np
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from transformers import pipeline

sys.path.append('.')
sys.path.append('./Time_TravelRephotography')
from utils import torch_helpers as th
from argparse import Namespace
from projector import (
    ProjectorArguments,
    main,
    create_generator,
    make_image,
)
sys.path.insert(0, 'StyleGAN-Human')

input_path = ''
spectral_sensitivity =  'b'
TITLE = 'Time-TravelRephotography'
DESCRIPTION = '''This is an unofficial demo for https://github.com/Time-Travel-Rephotography.
'''
ARTICLE = '<center><img src="https://visitor-badge.glitch.me/badge?page_id=Time-TravelRephotography" alt="visitor badge"/></center>'

TOKEN = "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"


pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', type=str, default='cpu')
    parser.add_argument('--theme', type=str)
    parser.add_argument('--live', action='store_true')
    parser.add_argument('--share', action='store_true')
    parser.add_argument('--port', type=int)
    parser.add_argument('--disable-queue',
                        dest='enable_queue',
                        action='store_false')
    parser.add_argument('--allow-flagging', type=str, default='never')
    return parser.parse_args()

def load_model(file_name: str, path:str,device: torch.device) -> nn.Module:
    path = hf_hub_download(f'{path}',
                           f'{file_name}',
                           use_auth_token=TOKEN)
    with open(path, 'rb') as f:
        model = torch.load(f)
    model.eval()
    model.to(device)
    with torch.inference_mode():
        z = torch.zeros((1, model.z_dim)).to(device)
        label = torch.zeros([1, model.c_dim], device=device)
        model(z, label, force_fp32=True)
    return model
   
def predict(text):
  return pipe(text)[0]["translation_text"]

def chat(message, history):
    history = history or []
    message = message.lower()
    if message.startswith("how many"):
        response = random.randint(1, 10)
    elif message.startswith("how"):
        response = random.choice(["Great", "Good", "Okay", "Bad"])
    elif message.startswith("where"):
        response = random.choice(["Here", "There", "Somewhere"])
    else:
        response = "I don't know"
    history.append((message, response))
    return history, history


def main():
    #torch.cuda.init()
    #if torch.cuda.is_initialized():
    #    ini = "True1"
    #else:
    #    ini = "False1"
    #result = subprocess.check_output(['nvidia-smi'])
    #load_model("stylegan2-ffhq-config-f","feng2022/Time-TravelRephotography_stylegan2-ffhq-config-f",device)
    """args = ProjectorArguments().parse(
        args=[str(input_path)],
        namespace=Namespace(
            # spectral_sensitivity=spectral_sensitivity,
            encoder_ckpt=f"checkpoint/encoder/checkpoint_{spectral_sensitivity}.pt",
            # encoder_name=spectral_sensitivity,
            # gaussian=gaussian_radius,
            log_visual_freq=1000,
            input='text',
        ))
    device = th.device()
    generator = create_generator("stylegan2-ffhq-config-f.pt","feng2022/Time-TravelRephotography_stylegan2-ffhq-config-f",args, device)
    latent = torch.randn((1, 512), device=device) 
    img_out, _, _ = generator([latent])
    imgs_arr = make_image(img_out)"""
    #iface = gr.Interface(
      #fn=predict, 
      #inputs='text',
      #outputs='text',
      #examples=['result'],
      #gr.outputs.Image(type='numpy', label='Output'),
        #title=TITLE,
        #description=DESCRIPTION,
        #article=ARTICLE,
        #theme=args.theme,
        #allow_flagging=args.allow_flagging,
        #live=args.live,
    #)
    
    #iface.launch(
        #enable_queue=args.enable_queue,
        #server_port=args.port,
        #share=args.share,
    #)
    chatbot = gr.Chatbot().style(color_map=("green", "pink"))
    demo = gr.Interface(
        chat,
        ["text", "state"],
        [chatbot, "state"],
        allow_flagging="never",
    )
    demo.launch()
    
if __name__ == '__main__':
    main()