pravin007s commited on
Commit
e5fd579
·
verified ·
1 Parent(s): 6c613bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -29
app.py CHANGED
@@ -1,8 +1,6 @@
1
  # -*- coding: utf-8 -*-
2
  """gen ai project f.ipynb
3
-
4
  Automatically generated by Colab.
5
-
6
  Original file is located at
7
  https://colab.research.google.com/drive/1iF7hdOjWNeFUtGvUYdaFsBErJGnY1h5J
8
  """
@@ -20,9 +18,13 @@ if hf_token:
20
  else:
21
  raise ValueError("Hugging Face token not found in environment variables.")
22
 
23
-
24
  # Import necessary libraries
25
  from transformers import MarianMTModel, MarianTokenizer, pipeline
 
 
 
 
 
26
 
27
  # Load the translation model and tokenizer
28
  model_name = "Helsinki-NLP/opus-mt-mul-en"
@@ -35,31 +37,19 @@ translator = pipeline("translation", model=model, tokenizer=tokenizer)
35
  # Function for translation
36
  def translate_text(tamil_text):
37
  try:
38
- # Perform translation
39
  translation = translator(tamil_text, max_length=40)
40
  translated_text = translation[0]['translation_text']
41
  return translated_text
42
  except Exception as e:
43
  return f"An error occurred: {str(e)}"
44
 
45
- # Test translation with example Tamil text
46
- tamil_text = "மழையுடன் ஒரு பூ" # "A flower with rain"
47
- translated_text = translate_text(tamil_text)
48
- print(f"Translated Text: {translated_text}")
49
-
50
- import requests
51
- import io
52
- from PIL import Image
53
- import matplotlib.pyplot as plt
54
-
55
  # API credentials and endpoint
56
  API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
57
- headers = {"Authorization": "Bearer HF_TOKEN"}
58
 
59
  # Function to send payload and generate image
60
  def generate_image(prompt):
61
  try:
62
- # Send request to API
63
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
64
 
65
  # Check if the response is successful
@@ -73,13 +63,15 @@ def generate_image(prompt):
73
  return image
74
  except Exception as e:
75
  print(f"Error opening image: {e}")
 
76
  else:
77
- # Handle non-200 responses
78
  print(f"Failed to get image: Status code {response.status_code}")
79
  print("Response content:", response.text) # Print response for debugging
 
80
 
81
  except Exception as e:
82
  print(f"An error occurred: {e}")
 
83
 
84
  # Display image
85
  def show_image(image):
@@ -90,16 +82,8 @@ def show_image(image):
90
  else:
91
  print("No image to display")
92
 
93
- # Test the function with a prompt
94
- prompt = "A flower with rain"
95
- image = generate_image(prompt)
96
-
97
- # Display the generated image
98
- show_image(image)
99
-
100
- from transformers import AutoTokenizer, AutoModelForCausalLM
101
-
102
  # Load GPT-Neo model for creative text generation
 
103
  gpt_neo_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
104
  gpt_neo_model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
105
 
@@ -110,8 +94,6 @@ def generate_creative_text(translated_text):
110
  creative_text = gpt_neo_tokenizer.decode(generated_text_ids[0], skip_special_tokens=True)
111
  return creative_text
112
 
113
- import gradio as gr
114
-
115
  # Function to handle the full workflow
116
  def translate_generate_image_and_text(tamil_text):
117
  # Step 1: Translate Tamil text to English
@@ -135,4 +117,4 @@ interface = gr.Interface(
135
  )
136
 
137
  # Launch Gradio app
138
- interface.launch()
 
1
  # -*- coding: utf-8 -*-
2
  """gen ai project f.ipynb
 
3
  Automatically generated by Colab.
 
4
  Original file is located at
5
  https://colab.research.google.com/drive/1iF7hdOjWNeFUtGvUYdaFsBErJGnY1h5J
6
  """
 
18
  else:
19
  raise ValueError("Hugging Face token not found in environment variables.")
20
 
 
21
  # Import necessary libraries
22
  from transformers import MarianMTModel, MarianTokenizer, pipeline
23
+ import requests
24
+ import io
25
+ from PIL import Image
26
+ import matplotlib.pyplot as plt
27
+ import gradio as gr
28
 
29
  # Load the translation model and tokenizer
30
  model_name = "Helsinki-NLP/opus-mt-mul-en"
 
37
  # Function for translation
38
  def translate_text(tamil_text):
39
  try:
 
40
  translation = translator(tamil_text, max_length=40)
41
  translated_text = translation[0]['translation_text']
42
  return translated_text
43
  except Exception as e:
44
  return f"An error occurred: {str(e)}"
45
 
 
 
 
 
 
 
 
 
 
 
46
  # API credentials and endpoint
47
  API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
48
+ headers = {"Authorization": f"Bearer {hf_token}"}
49
 
50
  # Function to send payload and generate image
51
  def generate_image(prompt):
52
  try:
 
53
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
54
 
55
  # Check if the response is successful
 
63
  return image
64
  except Exception as e:
65
  print(f"Error opening image: {e}")
66
+ return None
67
  else:
 
68
  print(f"Failed to get image: Status code {response.status_code}")
69
  print("Response content:", response.text) # Print response for debugging
70
+ return None
71
 
72
  except Exception as e:
73
  print(f"An error occurred: {e}")
74
+ return None
75
 
76
  # Display image
77
  def show_image(image):
 
82
  else:
83
  print("No image to display")
84
 
 
 
 
 
 
 
 
 
 
85
  # Load GPT-Neo model for creative text generation
86
+ from transformers import AutoTokenizer, AutoModelForCausalLM
87
  gpt_neo_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
88
  gpt_neo_model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
89
 
 
94
  creative_text = gpt_neo_tokenizer.decode(generated_text_ids[0], skip_special_tokens=True)
95
  return creative_text
96
 
 
 
97
  # Function to handle the full workflow
98
  def translate_generate_image_and_text(tamil_text):
99
  # Step 1: Translate Tamil text to English
 
117
  )
118
 
119
  # Launch Gradio app
120
+ interface.launch()