File size: 2,466 Bytes
de5aa49
 
 
 
ce8d5ec
b6fb00d
0b11f48
b6fb00d
de5aa49
 
 
 
85b4a75
de5aa49
85b4a75
de5aa49
 
 
 
 
 
 
 
 
 
85b4a75
de5aa49
 
 
 
85b4a75
34510b2
ce8d5ec
9f80763
 
4f9f8a3
 
89c6edc
4f9f8a3
89c6edc
fbe6d6c
 
 
 
4f9f8a3
f84cd17
5bd9083
 
 
 
 
 
7172af3
de5aa49
 
 
ce8d5ec
e89796e
 
 
de5aa49
89c6edc
de5aa49
 
5bd9083
 
0e42a9b
de5aa49
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import streamlit as st
import tensorflow as tf
import numpy as np

from config import *
from transformations import *
from rendering import *

# Setting random seed to obtain reproducible results.
tf.random.set_seed(42)

def show_rendered_image(r,theta,phi):
    
    # Get the camera to world matrix.
    
    c2w = pose_spherical(theta, phi, r)

    ray_oris, ray_dirs = get_rays(H, W, focal, c2w)
    rays_flat, t_vals = render_flat_rays(
        ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False
    )

    rgb, depth = render_rgb_depth(
        nerf_loaded, rays_flat[None, ...], t_vals[None, ...], rand=False, train=False
    )
    
    return(rgb[0], depth[0])


# app.py text matter starts here
    
st.title('3D volumetric rendering with NeRF - A concrete example, Ficus Dataset')

import base64

file = open(r'./training(3).gif', 'rb')
contents = file.read()
data_url = base64.b64encode(contents).decode('utf-8')
file.close()

# st.markdown(
#     f'<img src="data:image/gif;base64,{data_url}" alt="cat gif">',
#     unsafe_allow_html=True,
# )

st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network. The network learns to model the volumetric scene, thus generating novel views (images) of the 3D scene that the model was not shown at training time.")
# st.markdown("![](https://github.com/alesteba/training_NeRF/blob/e89da9448b3993117c78532c14c7142970f0d8df/training(3).gif)")

st.markdown(
    f'<img src="data:image/gif;base64,{data_url}" alt="cat gif" width=100%>',
    unsafe_allow_html=True,
)
# st.image(image, caption='Training Steps')
st.markdown("## Interactive Demo")

# download the model:
# from my own model repo

from huggingface_hub import from_pretrained_keras
nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus")


# set the values of r theta phi
r = 4.0
theta = st.slider("key_1",min_value=0.0, max_value=360.0, label_visibility="hidden")
phi = st.slider("key_2", min_value=0.0, max_value=360.0, label_visibility="hidden")
# phi = -30.0
color, depth = show_rendered_image(r, theta, phi)

col1, col2= st.columns(2)

with col1:
    color = tf.keras.utils.array_to_img(color)
    st.image(color, caption="Color Image", clamp=True, width=300)

with col2:
    depth = tf.keras.utils.array_to_img(depth[..., None])
    st.image(depth, caption="Depth Map", clamp=True, width=300)