ctn8176 commited on
Commit
dc5958a
·
verified ·
1 Parent(s): 6b537f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -2,7 +2,6 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import requests
5
- from IPython.display import display, Image
6
 
7
  model_name = "Writer/palmyra-small"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -44,12 +43,12 @@ def get_movie_info(movie_title):
44
  year = details_data.get("release_date", "Unknown Year")[:4]
45
  genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
46
  tmdb_link = f"https://www.themoviedb.org/movie/{movie_id}"
47
- poster_path = details_data.get("poster_path")
48
 
49
  # Convert poster_path to a complete image URL
50
- image_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else ""
51
 
52
- return f"Title: {title}, Year: {year}, Genre: {genre}\nFind more info here: {tmdb_link}", image_url
53
 
54
  else:
55
  return "Movie not found", ""
@@ -66,7 +65,7 @@ def generate_response(prompt):
66
  )
67
 
68
  # Call the get_movie_info function to enrich the response
69
- movie_info, image_url = get_movie_info(prompt)
70
 
71
  # Concatenate the movie info with the input template
72
  input_text_template += f" Movie Info: {movie_info}"
@@ -85,10 +84,10 @@ def generate_response(prompt):
85
 
86
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
87
 
88
- # Embed image directly in the response
89
- display(Image(url=image_url, alt="Movie Poster"))
90
 
91
- return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}"
92
 
93
  # Define chat function for gr.ChatInterface
94
  def chat_function(message, history):
@@ -100,3 +99,4 @@ def chat_function(message, history):
100
  chat_interface = gr.ChatInterface(chat_function)
101
  chat_interface.launch(share=True) # Added share=True to create a public link
102
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import requests
 
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
43
  year = details_data.get("release_date", "Unknown Year")[:4]
44
  genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
45
  tmdb_link = f"https://www.themoviedb.org/movie/{movie_id}"
46
+ # poster_path = details_data.get("poster_path")
47
 
48
  # Convert poster_path to a complete image URL
49
+ # image_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else ""
50
 
51
+ return f"Title: {title}, Year: {year}, Genre: {genre}\nFind more info here: {tmdb_link}"
52
 
53
  else:
54
  return "Movie not found", ""
 
65
  )
66
 
67
  # Call the get_movie_info function to enrich the response
68
+ movie_info = get_movie_info(prompt)
69
 
70
  # Concatenate the movie info with the input template
71
  input_text_template += f" Movie Info: {movie_info}"
 
84
 
85
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
86
 
87
+ # Display image directly in the chat
88
+ # image_component = gr.Image(image_url)
89
 
90
+ return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}\n"
91
 
92
  # Define chat function for gr.ChatInterface
93
  def chat_function(message, history):
 
99
  chat_interface = gr.ChatInterface(chat_function)
100
  chat_interface.launch(share=True) # Added share=True to create a public link
101
 
102
+