|
import load_model_pt |
|
import interpret_model_pt |
|
|
|
|
|
def sub_pipeline(raw_input, pretrained_model): |
|
tokenizer, model = load_model_pt.load_models_from_pretrained(pretrained_model) |
|
output_ = load_model_pt.load_pipeline(raw_input, pretrained_model) |
|
words_weightages = interpret_model_pt.explainer(raw_input, model, tokenizer) |
|
return output_, words_weightages |
|
|
|
def bias_checker(input_statement): |
|
pretrained_model_basic_check = "valurank/distilroberta-bias" |
|
pretrained_model_political = "valurank/distilroberta-mbfc-bias" |
|
pretrained_model_gender = "monologg/koelectra-base-v3-gender-bias" |
|
|
|
raw_input = input_statement |
|
|
|
output_stmt_zero, words_interpreted = sub_pipeline(raw_input, pretrained_model_basic_check) |
|
print(output_stmt_zero) |
|
return_var = " " |
|
interpret_var = " " |
|
|
|
if (output_stmt_zero["label"] == "BIASED" and output_stmt_zero["score"] >= 0.7) or (output_stmt_zero["label"] == "NEUTRAL" and output_stmt_zero["score"] < 0.6): |
|
|
|
|
|
|
|
|
|
output_stmt_political, words_interpreted_political = sub_pipeline(raw_input, pretrained_model_political) |
|
|
|
|
|
|
|
output_stmt_gender, words_interpreted_gender = sub_pipeline(raw_input, pretrained_model_gender) |
|
|
|
|
|
return_var = ("Generic:", output_stmt_zero,"\n","Gender:", output_stmt_gender,"\n","Political:", output_stmt_political) |
|
interpret_var = ("Generic:", words_interpreted, "\n", "Gender:", words_interpreted_gender, "\n","Political:", words_interpreted_political) |
|
else: |
|
|
|
return_var = "The statement seems ok as of now, please input another statement!" |
|
interpret_var = " " |
|
|
|
return return_var, interpret_var |
|
|
|
|
|
if __name__=="__main__": |
|
input_stmt = "Nevertheless, Trump and other Republicans have tarred the protests as havens for terrorists intent on destroying property." |
|
bias_checker(input_stmt) |
|
|