brendabor commited on
Commit
6868cdb
·
1 Parent(s): 4d0de17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -4
app.py CHANGED
@@ -5,6 +5,7 @@ import joblib
5
  import pandas as pd
6
  import numpy as np
7
  from sklearn.metrics.pairwise import cosine_similarity
 
8
 
9
  # Load the emotion prediction model
10
  emotion_model = load_model('lstm_model.h5')
@@ -18,9 +19,9 @@ emotion_model = load_model('lstm_model.h5')
18
  # Load the tokenizer (ensure it's the one used during training)
19
  tokenizer = joblib.load('tokenizer.pkl')
20
 
21
- # Load the dataset and preprocess
22
  df = pd.read_csv('df1.csv')
23
- df = df.drop(['Unnamed: 0', 'lyrics_filename', 'analysis_url', 'track_href', "type", "id", "uri", 'mood'], axis=1)
24
 
25
  # Load the similarity matrix
26
  similarity_matrix = np.load('similarity_matrix.npy')
@@ -40,6 +41,30 @@ knn = joblib.load('knn_model.joblib')
40
  # Load the KNN recommendation function
41
  recommend_knn = joblib.load('recommendation_knn_function.joblib')
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  # Set up the title of the app
44
  st.title('Emotion and Audio Feature-based Song Recommendation System')
45
 
@@ -48,11 +73,11 @@ query_data = df.iloc[0]
48
 
49
  # Process the lyrics
50
  sequence = tokenizer.texts_to_sequences([query_data['lyrics']])
51
- padded_sequence = pad_sequences(sequence, maxlen=50) # Adjust the maxlen to match the expected input size
52
  emotion = emotion_model.predict(padded_sequence).flatten()
53
 
54
  # Combine emotion and audio features for recommendation
55
- combined_features = np.concatenate([emotion, query_data[audio_feature_columns].values])
56
 
57
  # Generate recommendations using the hybrid model
58
  hybrid_recs = hybrid_recommendation(song_index=0)
 
5
  import pandas as pd
6
  import numpy as np
7
  from sklearn.metrics.pairwise import cosine_similarity
8
+ from sklearn.preprocessing import StandardScaler
9
 
10
  # Load the emotion prediction model
11
  emotion_model = load_model('lstm_model.h5')
 
19
  # Load the tokenizer (ensure it's the one used during training)
20
  tokenizer = joblib.load('tokenizer.pkl')
21
 
22
+ # Load the dataset
23
  df = pd.read_csv('df1.csv')
24
+ df = df.drop(['Unnamed: 0', 'lyrics_filename', 'analysis_url', 'track_href', "type", "id", "uri"], axis=1)
25
 
26
  # Load the similarity matrix
27
  similarity_matrix = np.load('similarity_matrix.npy')
 
41
  # Load the KNN recommendation function
42
  recommend_knn = joblib.load('recommendation_knn_function.joblib')
43
 
44
+ # Preprocess for content-based
45
+ audio_features = df[['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
46
+ 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
47
+ 'duration_ms', 'time_signature']]
48
+ mood_cats = df[['mood_cats']]
49
+
50
+ scaler = StandardScaler()
51
+ audio_features_scaled = scaler.fit_transform(audio_features)
52
+ audio_features_df = pd.DataFrame(audio_features_scaled, columns=audio_features.columns)
53
+ mood_cats_df = pd.DataFrame(mood_cats)
54
+ combined_features_content = pd.concat([mood_cats_df, audio_features_df], axis=1)
55
+
56
+ # Preprocess for KNN
57
+ audio_features_knn = df[['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
58
+ 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
59
+ 'duration_ms', 'time_signature']]
60
+ mood_cats_knn = df[['mood_cats']]
61
+
62
+ scaler_knn = StandardScaler()
63
+ audio_features_scaled_knn = scaler_knn.fit_transform(audio_features_knn)
64
+ audio_features_df_knn = pd.DataFrame(audio_features_scaled_knn, columns=audio_features_knn.columns)
65
+ mood_cats_df_knn = pd.DataFrame(mood_cats_knn)
66
+ combined_features_knn = pd.concat([mood_cats_df_knn, audio_features_df_knn], axis=1)
67
+
68
  # Set up the title of the app
69
  st.title('Emotion and Audio Feature-based Song Recommendation System')
70
 
 
73
 
74
  # Process the lyrics
75
  sequence = tokenizer.texts_to_sequences([query_data['lyrics']])
76
+ padded_sequence = pad_sequences(sequence, maxlen=50)
77
  emotion = emotion_model.predict(padded_sequence).flatten()
78
 
79
  # Combine emotion and audio features for recommendation
80
+ combined_features_hybrid = np.concatenate([emotion, query_data[audio_feature_columns].values])
81
 
82
  # Generate recommendations using the hybrid model
83
  hybrid_recs = hybrid_recommendation(song_index=0)