iterateai commited on
Commit
82f5414
1 Parent(s): 9230022

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -2
README.md CHANGED
@@ -47,6 +47,7 @@ The model is optimized for code generation and cannot be used as chat model.
47
  ## How to Get Started with the Model
48
 
49
  Use the code below to get started with the model.
 
50
  #import model from hugging face repository
51
  import torch
52
  from transformers import (
@@ -58,8 +59,9 @@ from transformers import (
58
  logging
59
  )
60
  model_repo_id ="iterateai/Interplay-AppCoder"
61
-
62
  #### Load the model in FP16
 
63
  iterate_model = AutoModelForCausalLM.from_pretrained(
64
  model_repo_id,
65
  low_cpu_mem_usage=True,
@@ -69,7 +71,7 @@ iterate_model = AutoModelForCausalLM.from_pretrained(
69
  trust_remote_code=True
70
  )
71
  #Note: You can quantize the model using bnb confi parameter to load the model in T4 GPU
72
-
73
  ### Load tokenizer to save it
74
  tokenizer = AutoTokenizer.from_pretrained(model_repo_id, trust_remote_code=True)
75
  tokenizer.pad_token = tokenizer.eos_token
 
47
  ## How to Get Started with the Model
48
 
49
  Use the code below to get started with the model.
50
+ ```
51
  #import model from hugging face repository
52
  import torch
53
  from transformers import (
 
59
  logging
60
  )
61
  model_repo_id ="iterateai/Interplay-AppCoder"
62
+ ```
63
  #### Load the model in FP16
64
+ ```
65
  iterate_model = AutoModelForCausalLM.from_pretrained(
66
  model_repo_id,
67
  low_cpu_mem_usage=True,
 
71
  trust_remote_code=True
72
  )
73
  #Note: You can quantize the model using bnb confi parameter to load the model in T4 GPU
74
+ ```
75
  ### Load tokenizer to save it
76
  tokenizer = AutoTokenizer.from_pretrained(model_repo_id, trust_remote_code=True)
77
  tokenizer.pad_token = tokenizer.eos_token