Alesteba commited on
Commit
ce8d5ec
·
1 Parent(s): 014a7c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -23
app.py CHANGED
@@ -2,22 +2,13 @@ import streamlit as st
2
  import tensorflow as tf
3
  import numpy as np
4
 
 
5
  from transformations import *
6
  from rendering import *
7
 
8
  # Setting random seed to obtain reproducible results.
9
  tf.random.set_seed(42)
10
 
11
- # Initialize global variables.
12
- AUTO = tf.data.AUTOTUNE
13
- BATCH_SIZE = 1
14
- NUM_SAMPLES = 32
15
- POS_ENCODE_DIMS = 16
16
- EPOCHS = 30
17
- H = 25
18
- W = 25
19
- focal = 0.6911112070083618
20
-
21
  def show_rendered_image(r,theta,phi):
22
  # Get the camera to world matrix.
23
  c2w = pose_spherical(theta, phi, r)
@@ -35,23 +26,18 @@ def show_rendered_image(r,theta,phi):
35
 
36
  # app.py text matter starts here
37
  st.title('NeRF:3D volumetric rendering with NeRF')
 
38
  st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)")
39
  st.markdown("## Description")
40
  st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.")
41
  st.markdown("## Interactive Demo")
42
 
43
  # download the model:
44
-
45
- # from huggingface_hub import snapshot_download
46
- # snapshot_download(repo_id="Alesteba/your-model-name", local_dir="./nerf")
47
 
48
  from huggingface_hub import from_pretrained_keras
49
-
50
  nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus")
51
 
52
- # load the pre-trained model
53
- # nerf_loaded = tf.keras.models.load_model("nerf", compile=False)
54
-
55
  # set the values of r theta phi
56
  r = 4.0
57
  theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0)
@@ -68,12 +54,6 @@ with col2:
68
  depth = tf.keras.utils.array_to_img(depth[..., None])
69
  st.image(depth, caption="Depth Map", clamp=True, width=300)
70
 
71
- st.markdown("## Tutorials")
72
- st.markdown("- [Keras](https://keras.io/examples/vision/nerf/)")
73
- st.markdown("- [PyImageSearch NeRF 1](https://www.pyimagesearch.com/2021/11/10/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-1/)")
74
- st.markdown("- [PyImageSearch NeRF 2](https://www.pyimagesearch.com/2021/11/17/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-2/)")
75
- st.markdown("- [PyImageSearch NeRF 3](https://www.pyimagesearch.com/2021/11/24/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-3/)")
76
-
77
  st.markdown("## Credits")
78
  st.markdown("- [PyImageSearch](https://www.pyimagesearch.com/)")
79
  st.markdown("- [JarvisLabs.ai GPU credits](https://jarvislabs.ai/)")
 
2
  import tensorflow as tf
3
  import numpy as np
4
 
5
+ from config import *
6
  from transformations import *
7
  from rendering import *
8
 
9
  # Setting random seed to obtain reproducible results.
10
  tf.random.set_seed(42)
11
 
 
 
 
 
 
 
 
 
 
 
12
  def show_rendered_image(r,theta,phi):
13
  # Get the camera to world matrix.
14
  c2w = pose_spherical(theta, phi, r)
 
26
 
27
  # app.py text matter starts here
28
  st.title('NeRF:3D volumetric rendering with NeRF')
29
+
30
  st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)")
31
  st.markdown("## Description")
32
  st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.")
33
  st.markdown("## Interactive Demo")
34
 
35
  # download the model:
36
+ # from my own model repo
 
 
37
 
38
  from huggingface_hub import from_pretrained_keras
 
39
  nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus")
40
 
 
 
 
41
  # set the values of r theta phi
42
  r = 4.0
43
  theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0)
 
54
  depth = tf.keras.utils.array_to_img(depth[..., None])
55
  st.image(depth, caption="Depth Map", clamp=True, width=300)
56
 
 
 
 
 
 
 
57
  st.markdown("## Credits")
58
  st.markdown("- [PyImageSearch](https://www.pyimagesearch.com/)")
59
  st.markdown("- [JarvisLabs.ai GPU credits](https://jarvislabs.ai/)")