Temuzin64 commited on
Commit
43bd0e1
·
verified ·
1 Parent(s): 7f2a558
Files changed (1) hide show
  1. app.py +30 -42
app.py CHANGED
@@ -2,46 +2,34 @@ import streamlit as st
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
-
6
  model_name = "codellama/CodeLlama-7b-Python-hf"
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
-
10
- device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
11
- model = model.to(device)
12
-
13
- # prompt = st.text_area("Enter your prompt:")
14
-
15
-
16
- # def translate_text(text, source_lang, target_lang):
17
- # tokenizer.src_lang = source_lang
18
- # encoded_text = tokenizer(text, return_tensors="pt").to(device)
19
-
20
- # generated_tokens = model.generate(**encoded_text, forced_bos_token_id=tokenizer.lang_code_to_id[target_lang])
21
-
22
- # #Decode the output
23
- # translated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
24
- # return translated_text
25
-
26
-
27
- st.markdown("### Python Code Helper")
28
- # source_language = ''
29
- # target_language = ''
30
- # source = st.sidebar.selectbox('Source Language', languages)
31
- # if source:
32
- # source_language = lang_dict.get(source)
33
- # st.write(source_language)
34
-
35
- # target = st.sidebar.selectbox('Target Language', languages)
36
- # if target:
37
- # target_language = lang_dict.get(target)
38
- # st.write(target_language)
39
-
40
- with st.form(key="myForm"):
41
- prompt = st.text_area("Enter your Prompt")
42
- submit = st.form_submit_button("Submit", type='primary')
43
- if submit and prompt:
44
- with st.spinner("Generating Response"):
45
- response = model.invoke(prompt)
46
- st.write(response)
47
-
 
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ # Load model and tokenizer
6
  model_name = "codellama/CodeLlama-7b-Python-hf"
7
+ st.title("Python Code Helper")
8
+
9
+ try:
10
+ st.info("Loading model... This may take a few moments.")
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
14
+ model = model.to(device)
15
+ st.success("Model loaded successfully.")
16
+ except Exception as e:
17
+ st.error(f"Error loading model: {e}")
18
+ st.stop()
19
+
20
+ # Input and form handling
21
+ st.markdown("### Python Code Generation")
22
+ with st.form(key="code_form"):
23
+ prompt = st.text_area("Enter your coding prompt:", height=200)
24
+ submit = st.form_submit_button("Generate Code")
25
+
26
+ if submit and prompt.strip():
27
+ with st.spinner("Generating response..."):
28
+ try:
29
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
30
+ outputs = model.generate(**inputs, max_length=512, num_return_sequences=1)
31
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+ st.markdown("### Generated Code:")
33
+ st.code(response, language="python")
34
+ except Exception as e:
35
+ st.error(f"An error occurred: {e}")