habulaj commited on
Commit
786baa3
·
1 Parent(s): dba4913

Update routers/profanity.py

Browse files
Files changed (1) hide show
  1. routers/profanity.py +30 -31
routers/profanity.py CHANGED
@@ -4,46 +4,45 @@ from better_profanity import profanity
4
  # Create a router for the profanity API
5
  router = APIRouter()
6
 
7
- # Define the route to check the text
8
  @router.get("/profanity/check/")
9
- def check_profanity(text: str = Query(..., description="Text to be analyzed")):
10
  """
11
- Checks if the text contains profanity and returns details.
12
  """
13
- # Ensure text is not empty
14
- if not text.strip():
15
- raise HTTPException(status_code=400, detail="The input text cannot be empty.")
16
 
17
- # Load default profanity words
18
- profanity.load_censor_words()
 
19
 
20
- # Check for profanity
21
- contains_profanity = profanity.contains_profanity(text)
22
- censored_text = profanity.censor(text)
 
 
23
 
24
- # Identify offensive words
25
- offensive_words = [
26
- word for word in text.split()
27
- if profanity.contains_profanity(word)
28
- ]
29
- offensive_word_count = len(offensive_words)
30
- total_words = len(text.split())
31
 
32
- # Calculate percentage of offensive words
33
- offensive_percentage = (
34
- (offensive_word_count / total_words) * 100 if total_words > 0 else 0
35
- )
36
-
37
- return {
38
- "status_code": 200,
39
- "message": "Profanity analysis successful.",
40
- "data": {
41
  "original_text": text,
42
  "contains_profanity": contains_profanity,
43
  "censored_text": censored_text,
44
  "offensive_words": offensive_words,
45
  "offensive_word_count": offensive_word_count,
46
- "total_word_count": total_words,
47
- "offensive_percentage": round(offensive_percentage, 2),
48
- },
49
- }
 
 
 
 
 
 
4
  # Create a router for the profanity API
5
  router = APIRouter()
6
 
 
7
  @router.get("/profanity/check/")
8
+ def check_profanity(text: str = Query(..., description="Text to be checked")):
9
  """
10
+ Check if the text contains profanity and return details.
11
  """
12
+ try:
13
+ # Load default dictionary
14
+ profanity.load_censor_words()
15
 
16
+ # Detect profanity
17
+ contains_profanity = profanity.contains_profanity(text)
18
+ censored_text = profanity.censor(text)
19
 
20
+ # Extract offensive words
21
+ words = text.split()
22
+ offensive_words = [
23
+ word for word in words if profanity.contains_profanity(word)
24
+ ]
25
 
26
+ # Calculate percentage of offensive words
27
+ offensive_word_count = len(offensive_words)
28
+ total_word_count = len(words)
29
+ offensive_percentage = (
30
+ (offensive_word_count / total_word_count) * 100 if total_word_count > 0 else 0
31
+ )
 
32
 
33
+ # Return response
34
+ return {
 
 
 
 
 
 
 
35
  "original_text": text,
36
  "contains_profanity": contains_profanity,
37
  "censored_text": censored_text,
38
  "offensive_words": offensive_words,
39
  "offensive_word_count": offensive_word_count,
40
+ "total_word_count": total_word_count,
41
+ "offensive_percentage": offensive_percentage,
42
+ }
43
+ except Exception as e:
44
+ # Handle errors gracefully
45
+ raise HTTPException(
46
+ status_code=500,
47
+ detail=f"An error occurred while processing the text: {str(e)}"
48
+ )