iterateai commited on
Commit
2cde19f
1 Parent(s): 82f5414

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -2
README.md CHANGED
@@ -72,21 +72,24 @@ iterate_model = AutoModelForCausalLM.from_pretrained(
72
  )
73
  #Note: You can quantize the model using bnb confi parameter to load the model in T4 GPU
74
  ```
 
75
  ### Load tokenizer to save it
76
  tokenizer = AutoTokenizer.from_pretrained(model_repo_id, trust_remote_code=True)
77
  tokenizer.pad_token = tokenizer.eos_token
78
  tokenizer.padding_side = "right"
 
79
 
 
80
  ### Inferencing
81
 
82
  logging.set_verbosity(logging.CRITICAL)
83
- #### Sample prompt
84
  prompt = "Can you provide a python script that uses the YOLOv8 model from the Ultralytics library to detect people in an image, draw green bounding boxes around them, and then save the image?"
85
 
86
  pipe = pipeline(task="text-generation", model=iterate_model, tokenizer=tokenizer, max_length=1024)
87
  result = pipe(f"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response:",temperature=0.1,do_sample=True)
88
  print(result[0]['generated_text'])
89
-
90
  ## Sample demo notebook
91
  [https://colab.research.google.com/drive/1USuNLFxLex-C5tLHYET_nQfpM4ALCbc5?usp=sharing#scrollTo=lNCZTBj1nBsJ]
92
 
 
72
  )
73
  #Note: You can quantize the model using bnb confi parameter to load the model in T4 GPU
74
  ```
75
+ ```
76
  ### Load tokenizer to save it
77
  tokenizer = AutoTokenizer.from_pretrained(model_repo_id, trust_remote_code=True)
78
  tokenizer.pad_token = tokenizer.eos_token
79
  tokenizer.padding_side = "right"
80
+ ```
81
 
82
+ ```
83
  ### Inferencing
84
 
85
  logging.set_verbosity(logging.CRITICAL)
86
+ #Sample prompt
87
  prompt = "Can you provide a python script that uses the YOLOv8 model from the Ultralytics library to detect people in an image, draw green bounding boxes around them, and then save the image?"
88
 
89
  pipe = pipeline(task="text-generation", model=iterate_model, tokenizer=tokenizer, max_length=1024)
90
  result = pipe(f"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response:",temperature=0.1,do_sample=True)
91
  print(result[0]['generated_text'])
92
+ ```
93
  ## Sample demo notebook
94
  [https://colab.research.google.com/drive/1USuNLFxLex-C5tLHYET_nQfpM4ALCbc5?usp=sharing#scrollTo=lNCZTBj1nBsJ]
95