File size: 1,906 Bytes
de5aa49
 
 
 
ce8d5ec
b6fb00d
0b11f48
b6fb00d
de5aa49
 
 
 
85b4a75
de5aa49
85b4a75
de5aa49
 
 
 
 
 
 
 
 
 
85b4a75
de5aa49
 
 
 
85b4a75
de5aa49
ce8d5ec
de5aa49
 
 
db33a25
de5aa49
 
 
ce8d5ec
e89796e
 
 
de5aa49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import streamlit as st
import tensorflow as tf
import numpy as np

from config import *
from transformations import *
from rendering import *

# Setting random seed to obtain reproducible results.
tf.random.set_seed(42)

def show_rendered_image(r,theta,phi):
    
    # Get the camera to world matrix.
    
    c2w = pose_spherical(theta, phi, r)

    ray_oris, ray_dirs = get_rays(H, W, focal, c2w)
    rays_flat, t_vals = render_flat_rays(
        ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False
    )

    rgb, depth = render_rgb_depth(
        nerf_loaded, rays_flat[None, ...], t_vals[None, ...], rand=False, train=False
    )
    
    return(rgb[0], depth[0])


# app.py text matter starts here
    
st.title('NeRF:3D volumetric rendering with NeRF')

st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)")
st.markdown("## Description")
st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.")
st.markdown("![Alt Text](https://drive.google.com/file/d/1oC32vJZzc1YiK1ggzO_Vg6EY3j5E2aR9/view?usp=share_link)")
st.markdown("## Interactive Demo")

# download the model:
# from my own model repo

from huggingface_hub import from_pretrained_keras
nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus")

# set the values of r theta phi
r = 4.0
theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0)
phi = -30.0
color, depth = show_rendered_image(r, theta, phi)

col1, col2= st.columns(2)

with col1:
    color = tf.keras.utils.array_to_img(color)
    st.image(color, caption="Color Image", clamp=True, width=300)

with col2:
    depth = tf.keras.utils.array_to_img(depth[..., None])
    st.image(depth, caption="Depth Map", clamp=True, width=300)