1.数据预处理
本实验使用AMIGOS数据集。首先将GSR进行提取存入到新的csv文件中。通过cvxEDA将其分解为tonic信号和phasic信号。
这样我们就有了gsr原始信号、tonic信号、phasic信号。他们的长度各为5000。将每一个信号合并得到一维的(1,15000)的数据(也就是把这三个数据放到一行里)。
代码
import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedShuffleSplit from keras.optimizers import SGD from keras.utils import np_utils from keras.layers import * from keras.models import * train = pd.read_csv('F:/Pycharm/py/data_gsr/cvx1_train.csv') test = pd.read_csv('F:/Pycharm/py/data_gsr/cvx1_test.csv') def encode(train, test): label_encoder = LabelEncoder().fit(train.species) labels = label_encoder.transform(train.species) classes = list(label_encoder.classes_) train = train.drop(['species', 'id'], axis=1) test = test.drop('id', axis=1) return train, labels, test, classes train, labels, test, classes = encode(train, test) # standardize train features scaler = StandardScaler().fit(train.values) scaled_train = scaler.transform(train.values) # split train data into train and validation sss = StratifiedShuffleSplit(test_size=0.1, random_state=23) for train_index, valid_index in sss.split(scaled_train, labels): X_train, X_valid = scaled_train[train_index], scaled_train[valid_index] y_train, y_valid = labels[train_index], labels[valid_index] nb_features = 5000 # number of features per features type (shape, texture, margin) nb_class = len(classes) # reshape train data X_train_r = np.zeros((len(X_train), nb_features, 3)) X_train_r[:, :, 0] = X_train[:, :nb_features] X_train_r[:, :, 1] = X_train[:, nb_features:10000] X_train_r[:, :, 2] = X_train[:, 10000:] # reshape validation data X_valid_r = np.zeros((len(X_valid), nb_features, 3)) X_valid_r[:, :, 0] = X_valid[:, :nb_features] X_valid_r[:, :, 1] = X_valid[:, nb_features:10000] X_valid_r[:, :, 2] = X_valid[:, 10000:] # Keras model with one Convolution1D layer # unfortunately more number of covnolutional layers, filters and filters lenght # don't give better accuracy model_m = Sequential() # 定义卷积神经网络模型 model_m.add(Conv1D(100, 100, activation='relu', input_shape=(nb_features, 3))) model_m.add(Conv1D(64, 100, activation='relu')) model_m.add(MaxPooling1D(3)) model_m.add(Conv1D(128, 100, activation='relu')) model_m.add(Conv1D(128, 100, activation='relu')) model_m.add(GlobalAveragePooling1D()) model_m.add(Dropout(0.5)) model_m.add(Dense(3, activation='softmax')) y_train = np_utils.to_categorical(y_train, nb_class) y_valid = np_utils.to_categorical(y_valid, nb_class) sgd = SGD(lr=0.01, nesterov=True, decay=1e-6, momentum=0.9) model_m.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) history = model_m.fit(X_train_r, y_train, batch_size=64, epochs=20, validation_split=0.2, )
讯享网

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容,请联系我们,一经查实,本站将立刻删除。
如需转载请保留出处:https://51itzy.com/kjqy/135989.html