adnanaman commited on
Commit
4526f9d
·
verified ·
1 Parent(s): 5772d76

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -10
README.md CHANGED
@@ -71,19 +71,46 @@ For use cases involving other languages or highly specialized product categories
71
  Use the code below to get started with the model for product classification:
72
 
73
  ```python
74
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
75
 
76
- # Load the model and tokenizer
77
- tokenizer = AutoTokenizer.from_pretrained("Adnan-AI-Labs/DistilBERT-ProductClassifier")
78
- model = AutoModelForSequenceClassification.from_pretrained("Adnan-AI-Labs/DistilBERT-ProductClassifier")
79
 
80
- # Create a pipeline for text classification
81
- classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
- # Example usage
84
- product_description = "High-resolution digital camera with 20MP sensor."
85
- result = classifier(product_description)
86
- print(result)
87
  ```
88
 
89
  # Training Details
 
71
  Use the code below to get started with the model for product classification:
72
 
73
  ```python
74
+ import torch
75
+ from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
76
 
77
+ # Define the model repository name
78
+ model_name = "Adnan-AI-Labs/DistilBERT-ProductClassifier"
 
79
 
80
+ # Load the tokenizer and model from the Hugging Face Hub
81
+ try:
82
+ # Load tokenizer
83
+ tokenizer = DistilBertTokenizer.from_pretrained(model_name, use_fast=True)
84
+
85
+ # Load model, forcing the download to avoid any cached version
86
+ model = DistilBertForSequenceClassification.from_pretrained(model_name)
87
+
88
+ print("Model and tokenizer loaded successfully.")
89
+
90
+ except Exception as e:
91
+ print(f"An error occurred while loading the model: {e}")
92
+ exit()
93
+
94
+ # Test the model with some sample inputs
95
+ sample_texts = [
96
+ "estar s20 single uk sim free mobile phone red",
97
+ "cello c40227dvbt2 40 full hd black led tv",
98
+ ]
99
+
100
+ # Prepare the inputs for the model
101
+ inputs = tokenizer(sample_texts, padding=True, truncation=True, return_tensors="pt")
102
+
103
+ # Make predictions
104
+ with torch.no_grad():
105
+ outputs = model(**inputs)
106
+
107
+ # Get the predicted class indices
108
+ predictions = torch.argmax(outputs.logits, dim=1)
109
+
110
+ # Print out the predictions
111
+ for text, pred in zip(sample_texts, predictions):
112
+ print(f"Text: {text} \nPredicted Class: {pred.item()}\n")
113
 
 
 
 
 
114
  ```
115
 
116
  # Training Details