File size: 2,964 Bytes
40ce629
 
 
 
 
 
f57ed6a
40ce629
f57ed6a
119d5c2
f57ed6a
40ce629
f57ed6a
98a2239
40ce629
 
51f1e70
38a5e47
cc52c45
 
51f1e70
0025f06
 
 
 
40ce629
 
fe030b4
b703853
 
40ce629
 
 
 
 
972a2bf
 
 
abe9d47
 
 
 
 
 
 
 
 
 
 
 
 
39868fe
40e259d
 
39868fe
 
40e259d
39868fe
 
 
 
 
 
 
 
972a2bf
 
40e259d
 
1f683a7
 
 
 
 
7f59eee
abe9d47
fe030b4
 
 
 
 
 
 
 
 
f4f2167
 
6443665
40e259d
 
 
 
f4f2167
40e259d
 
 
 
 
78f6e98
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#!/usr/bin/env python

from __future__ import annotations

import argparse
import functools
import os
import pickle
import sys
import subprocess

import gradio as gr
import numpy as np
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from transformers import pipeline

sys.path.append('.')
sys.path.append('./Time_TravelRephotography')
from argparse import Namespace
from projector import (
    ProjectorArguments,
    main,
)
sys.path.insert(0, 'StyleGAN-Human')

spectral_sensitivity =  'b'
TITLE = 'Time-TravelRephotography'
DESCRIPTION = '''This is an unofficial demo for https://github.com/Time-Travel-Rephotography.
'''
ARTICLE = '<center><img src="https://visitor-badge.glitch.me/badge?page_id=hysts.stylegan-human" alt="visitor badge"/></center>'

TOKEN = "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"


pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', type=str, default='cpu')
    parser.add_argument('--theme', type=str)
    parser.add_argument('--live', action='store_true')
    parser.add_argument('--share', action='store_true')
    parser.add_argument('--port', type=int)
    parser.add_argument('--disable-queue',
                        dest='enable_queue',
                        action='store_false')
    parser.add_argument('--allow-flagging', type=str, default='never')
    return parser.parse_args()

def load_model(file_name: str, path:str,device: torch.device) -> nn.Module:
    path = hf_hub_download(f'{path}',
                           f'{file_name}',
                           use_auth_token=TOKEN)
    with open(path, 'rb') as f:
        model = torch.load(f)
    model.eval()
    model.to(device)
    with torch.inference_mode():
        z = torch.zeros((1, model.z_dim)).to(device)
        label = torch.zeros([1, model.c_dim], device=device)
        model(z, label, force_fp32=True)
    return model
   
def predict(text):
  return pipe(text)[0]["translation_text"]
 
def main():
    #torch.cuda.init()
    #if torch.cuda.is_initialized():
    #    ini = "True1"
    #else:
    #    ini = "False1"
    #result = subprocess.check_output(['nvidia-smi'])
    
    args = ProjectorArguments().parse(
        args=[str(input_path)], 
        namespace=Namespace(
            # spectral_sensitivity=spectral_sensitivity,
            encoder_ckpt=f"checkpoint/encoder/checkpoint_{spectral_sensitivity}.pt",
            # encoder_name=spectral_sensitivity,
            # gaussian=gaussian_radius,
            log_visual_freq=1000,
))
    #device = torch.device(args.device)
    #load_model("stylegan2-ffhq-config-f","feng2022/Time-TravelRephotography_stylegan2-ffhq-config-f",device)
    
    iface = gr.Interface(
      fn=predict, 
      inputs='text',
      outputs='text',
      examples=['result']
    )
    
    iface.launch()
    
if __name__ == '__main__':
    main()