import gradio as gr import torch from transformers import AutoTokenizer,AutoModelForCausalLM,pipeline models=[ "nadiamaqbool81/starcoderbase-1b-hf", "nadiamaqbool81/starcoderbase-1b-hf_python", "nadiamaqbool81/codet5-large-hf", "nadiamaqbool81/codet5-large-hf-python", "nadiamaqbool81/llama-2-7b-int4-java-code-1.178k", "nadiamaqbool81/llama-2-7b-int4-python-code-510" ] names=[ "nadiamaqbool81/starcoderbase-java", "nadiamaqbool81/starcoderbase-python", "nadiamaqbool81/codet5-java", "nadiamaqbool81/codet5-python", "nadiamaqbool81/llama-2-java", "nadiamaqbool81/llama-2-python" ] model_box=[ gr.load(f"models/{models[0]}"), gr.load(f"models/{models[1]}"), gr.load(f"models/{models[2]}"), gr.load(f"models/{models[3]}"), gr.load(f"models/{models[4]}"), gr.load(f"models/{models[5]}"), ] current_model=model_box[0] pythonFlag = "false" javaFlag = "false" def the_process(input_text, model_choice): global pythonFlag global javaFlag global output if(model_choice == 5): if(pythonFlag == "false"): tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/llama-2-7b-int4-python-code-510") model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/llama-2-7b-int4-python-code-510", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0} ) output = run_predict(input_text, model, tokenizer) pythonFlag = "true" elif(pythonFlag == "true"): output = run_predict(input_text, model, tokenizer) elif(model_choice == 4): if(javaFlag == "false"): tokenizerJava = AutoTokenizer.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k") modelJava = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0}) output = run_predict(input_text, modelJava, tokenizerJava) javaFlag = "true" elif(javaFlag == "true"): output = run_predict(input_text, modelJava, tokenizerJava) else: a_variable = model_box[model_choice] output = a_variable(input_text) return(output) def run_predict(text, model, tokenizer): prompt = text pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=300) result = pipe(f"[INST] {prompt} [/INST]") arr = result[0]['generated_text'].split('[/INST]') return arr[1] gr.HTML("""

Text to Code Generation

""") model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True) input_text = gr.Textbox(label="Input Prompt") output_window = gr.Code(label="Generated Code") interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text") interface.launch()