我正在创建一个程序,它将根据语音数据预测情绪。但我收到此 AttributeError:'NoneType' 对象没有 linecnnhistory=model.fit(x_trainc... 的属性 'items'
我正在创建一个程序,用于根据语音数据预测情绪。但我收到此 AttributeError: 'NoneType' object has no attribute 'items' for the line
cnnhistory=model.fit(x_traincnn, y_train, batch_size=20, epochs=500, validation_data=(x_testcnn, y_test))
我仔细检查了 x_traincnn、x_test、y_traincnn、y_test,它们都不包含任何 None 或 NaN 值。如何解决这个问题?我使用的是来自 Kaggle 的 RAVDESS 情感语音音频数据集,代码基本上来自 此存储库
我只是做了一点修改。同时给出了我的代码。
import numpy as np
import librosa
import glob
import os
from sklearn.model_selection import train_test_split
int2emotion = {
"01": "neutral",
"02": "calm",
"03": "happy",
"04": "sad",
"05": "angry",
"06": "fearful",
"07": "disgust",
"08": "surprised"
}
AVAILABLE_EMOTIONS = {"angry", "sad", "neutral", "happy"}
def extract_feature(file_name, **kwargs):
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate = sound_file.samplerate
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(y=X, sr=sample_rate).T, axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T, axis=0)
result = np.hstack((result, tonnetz))
return result
def load_data(test_size=0.2):
X, y = [], []
pathx = "D:\\Programming_related\\PROJECTS\\ALL_PROJECT\\voice-processing-with-ai\\Actors_1\\Actor_*\\*.wav"
for file in glob.glob(pathx):
try:
basename = os.path.basename(file)
print(f"Processing file: {basename}")
emotion = int2emotion[basename.split("-")[2]]
if emotion not in AVAILABLE_EMOTIONS:
print(f"Skipping file: {basename} (emotion {emotion} not in AVAILABLE_EMOTIONS)")
continue
features = extract_feature(file, mfcc=True, chroma=True, mel=True)
X.append(features)
l = {'happy': 0.0, 'sad': 1.0, 'neutral': 3.0, 'angry': 4.0}
y.append(l[emotion])
except Exception as e:
print(f"Error processing file {file}: {e}")
continue
if len(X) == 0:
raise ValueError("No data found. Please check the file paths and ensure the dataset is correctly placed.")
return train_test_split(np.array(X), y, test_size=test_size, random_state=7)
X_train, X_test, y_train, y_test = load_data(test_size=0.25)
print("[+] Number of training samples:", X_train.shape[0])
print("[+] Number of testing samples:", X_test.shape[0])
import numpy as np
X_train = np.asarray(X_train)
y_train= np.asarray(y_train)
X_test=np.array(X_test)
y_test=np.array(y_test)
X_train.shape,y_train.shape,X_test.shape,y_test.shape
x_traincnn = np.expand_dims(X_train, axis=2)
x_testcnn = np.expand_dims(X_test, axis=2)
print("Shapes after expanding dimensions:")
print(f"x_traincnn shape: {x_traincnn.shape}")
print(f"x_testcnn shape: {x_testcnn.shape}")
x_traincnn.shape,x_testcnn.shape
import keras
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.utils import to_categorical
from keras.layers import Input, Flatten, Dropout, Activation
from keras.layers import Conv1D, MaxPooling1D
from keras.models import Model
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Conv1D(128, 5,padding='same',input_shape=(180,1))) #1
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(128, 5,padding='same',)) #2
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(8)) #3
model.add(Activation('softmax'))
# opt = keras.optimizers.rmsprop(lr=0.00005, rho=0.9, epsilon=None, decay=0.0)
opt = keras.optimizers.RMSprop(learning_rate=0.00005, rho=0.9, epsilon=None, decay=0.0)
model.summary()
model.compile(loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
cnnhistory=model.fit(x_traincnn, y_train, batch_size=20, epochs=500, validation_data=(x_testcnn, y_test))
loss, acc = model.evaluate(x_testcnn, y_test)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
以下是完整的错误消息:
Cell In[52], line 1
----> 1 cnnhistory=model.fit(x_traincnn, y_train, batch_size=20, epochs=500, validation_data=(x_testcnn, y_test))
File d:\Programming_related\PROJECTS\ALL_PROJECT\voice-processing-with-ai\.conda\lib\site-packages\keras\src\utils\traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)
119 filtered_tb = _process_traceback_frames(e.__traceback__)
120 # To get the full stack trace, call:
121 # `keras.config.disable_traceback_filtering()`
--> 122 raise e.with_traceback(filtered_tb) from None
123 finally:
124 del filtered_tb
File d:\Programming_related\PROJECTS\ALL_PROJECT\voice-processing-with-ai\.conda\lib\site-packages\keras\src\trainers\trainer.py:923, in Trainer._pythonify_logs(self, logs)
921 def _pythonify_logs(self, logs):
922 result = {}
--> 923 for key, value in sorted(logs.items()):
924 if isinstance(value, dict):
925 result.update(self._pythonify_logs(value))
AttributeError: 'NoneType' object has no attribute 'items'```