shainaraza commited on
Commit
3e5bb17
1 Parent(s): 0451811

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -35
app.py CHANGED
@@ -47,41 +47,28 @@ instruction = ("Instruction: As a helpful, respectful and trustworthy debiasing
47
 
48
 
49
  def get_debiased_sequence(prompt):
50
- """Generate a debiased version of the provided text using the debiasing pipeline."""
51
- instruction_prefix = "<s> <<SYS>> {instruction} <</SYS>> [INST]".format(instruction=instruction)
52
- instruction_suffix = "[/INST]</s>"
53
- full_input_text = f"{instruction_prefix}{prompt}{instruction_suffix}"
54
-
55
- # Tokenize the full input text to calculate its length in tokens
56
- input_tokens = debias_tokenizer.encode(full_input_text)
57
-
58
- # Ensure max_length is greater than the number of input tokens
59
- max_length = len(input_tokens) + 50 # Add a buffer to accommodate generation without truncation
60
-
61
- try:
62
- sequences = debias_pipeline(
63
- full_input_text,
64
- do_sample=True,
65
- top_k=10,
66
- num_return_sequences=1,
67
- eos_token_id=debias_tokenizer.eos_token_id,
68
- max_length=max_length, # Updated to use calculated max_length
69
- )
70
-
71
- if sequences:
72
- res = sequences[0]['generated_text']
73
- # Assuming the response also includes the [/INST] tag, split and extract after this tag
74
- result_part = res.split('[/INST]')[-1]
75
- clean_result = ''.join(c for c in result_part if c.isprintable())
76
- return clean_result.strip()
77
- except RuntimeError as e:
78
- if 'CUDA out of memory' in str(e):
79
- torch.cuda.empty_cache() # Try clearing cache to free up memory
80
- return "Error: Out of memory. Please try again with shorter input or less complex instructions."
81
- else:
82
- raise e # Re-raise the exception if it's not a memory error
83
-
84
- return "No output generated. Check model configuration or input."
85
 
86
  # Streamlit interface setup
87
  st.title('UnBIAS App')
 
47
 
48
 
49
  def get_debiased_sequence(prompt):
50
+ """
51
+ Generate a debiased version of the provided text using the debiasing pipeline.
52
+ Args:
53
+ - prompt (str): Text to be debiased.
54
+ Returns:
55
+ - str: Debiased text.
56
+ """
57
+ input_text = f"<s> <<SYS>>{instruction}. {sys_message} <</SYS>> [INST]{prompt} [/INST]"
58
+ sequences = debias_pipeline(
59
+ input_text,
60
+ do_sample=True,
61
+ top_k=10,
62
+ num_return_sequences=1,
63
+ eos_token_id=tokenizer.eos_token_id,
64
+ max_length=len(prompt.split(" ")) + len(input_text.split(" ")) + 100, # Increased max_length
65
+ )
66
+ res = sequences[0]['generated_text']
67
+ result_part = res.split('[/INST]')[-1]
68
+ clean_result = ''.join(c for c in result_part if c.isprintable())
69
+ cleaned_text = re_incomplete_sentence(clean_result.strip())
70
+ return cleaned_text.strip()
71
+
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # Streamlit interface setup
74
  st.title('UnBIAS App')