lgq12697 commited on
Commit
44646c0
1 Parent(s): e894332

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -38,7 +38,7 @@ Here is a simple code for inference:
38
  ```python
39
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
40
 
41
- model_name = 'agront-1b-promoter_strength_leaf'
42
  # load model and tokenizer
43
  model = AutoModelForSequenceClassification.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
44
  tokenizer = AutoTokenizer.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
@@ -47,7 +47,7 @@ tokenizer = AutoTokenizer.from_pretrained(f'zhangtaolab/{model_name}', trust_rem
47
  sequences = ['TACTCTAATCGTATCAGCTGCACTTGCGTACAGGCTACCGGCGTCCTCAGCCACGTAAGAAAAGGCCCAATAAAGGCCCAACTACAACCAGCGGATATATATACTGGAGCCTGGCGAGATCACCCTAACCCCTCACACTCCCATCCAGCCGCCACCAGGTGCAGAGTGTT',
48
  'ATTTCAAAACTAGTTTTCTATAAACGAAAACTTATATTTATTCCGCTTGTTCCGTTTGATCTGCTGATTCGACACCGTTTTAACGTATTTTAAGTAAGTATCAGAAATATTAATGTGAAGATAAAAGAAAATAGAGTAAATGTAAAGGAAAATGCATAAGATTTTGTTGA']
49
  pipe = pipeline('text-classification', model=model, tokenizer=tokenizer,
50
- trust_remote_code=True, top_k=None)
51
  results = pipe(sequences)
52
  print(results)
53
 
 
38
  ```python
39
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
40
 
41
+ model_name = 'agront-1b-promoter_strength_protoplast'
42
  # load model and tokenizer
43
  model = AutoModelForSequenceClassification.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
44
  tokenizer = AutoTokenizer.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
 
47
  sequences = ['TACTCTAATCGTATCAGCTGCACTTGCGTACAGGCTACCGGCGTCCTCAGCCACGTAAGAAAAGGCCCAATAAAGGCCCAACTACAACCAGCGGATATATATACTGGAGCCTGGCGAGATCACCCTAACCCCTCACACTCCCATCCAGCCGCCACCAGGTGCAGAGTGTT',
48
  'ATTTCAAAACTAGTTTTCTATAAACGAAAACTTATATTTATTCCGCTTGTTCCGTTTGATCTGCTGATTCGACACCGTTTTAACGTATTTTAAGTAAGTATCAGAAATATTAATGTGAAGATAAAAGAAAATAGAGTAAATGTAAAGGAAAATGCATAAGATTTTGTTGA']
49
  pipe = pipeline('text-classification', model=model, tokenizer=tokenizer,
50
+ trust_remote_code=True, function_to_apply="none")
51
  results = pipe(sequences)
52
  print(results)
53