-
Notifications
You must be signed in to change notification settings - Fork 0
/
xcbnzxbcn.txt
102 lines (69 loc) · 2.72 KB
/
xcbnzxbcn.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_squared_error
data = pd.read_csv('./data.tsv',sep='\t',skipinitialspace=True)
data.head()
]
# Get features and labels
features = data.iloc[:, :18].values
labels = data.iloc[:, 18:26].values
# Normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
features_scaled = scaler.fit_transform(features)
labels_scaled = labels
# Split into train, validation, and test sets
train_size = int(len(features_scaled) * 0.7)
val_size = int(len(features_scaled) * 0.1)
test_size = len(features_scaled) - train_size - val_size
train_features = features_scaled[:train_size, :]
train_labels = labels_scaled[:train_size]
val_features = features_scaled[train_size:train_size+val_size, :]
val_labels = labels_scaled[train_size:train_size+val_size]
test_features = features_scaled[train_size+val_size:, :]
test_labels = labels_scaled[train_size+val_size:]
# Create LSTM dataset
def create_lstm_dataset(features, labels, time_steps=1):
dataX, dataY = [], []
for i in range(len(features) - time_steps):
a = features[i:(i + time_steps), :]
dataX.append(a)
dataY.append(labels[i + time_steps:i + time_steps + 1])
return np.array(dataX), np.array(dataY)
time_steps = 7
trainX, trainY = create_lstm_dataset(train_features, train_labels, time_steps)
valX, valY = create_lstm_dataset(val_features, val_labels, time_steps)
testX, testY = create_lstm_dataset(test_features, test_labels, time_steps)
# 构建LSTM模型
model = Sequential()
model.add(LSTM(128, activation='relu', input_shape=(time_steps, 18)))
model.add(Dense(8)) # 8 output units for 8 labels
learning_rate = 0.001
optimizer = Adam(lr=learning_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer)
# 拟合模型
# Fit the model
history = model.fit(trainX, trainY, validation_data=(valX, valY), epochs=100, batch_size=32, verbose=1)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
# 绘制损失曲线
# Plot loss
plt.plot(train_loss, label='Train Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend()
plt.show()
# 预测测试集
# Predict on test set
test_predict = model.predict(testX)
testY=testY.reshape(len(testY),8)
#精度评价
# Accuracy evaluation
for i in range(8):
test_predict1=test_predict[:,i]
testY1= testY[:,i]
MSE = mean_squared_error(test_predict1, testY1)
print("Mean Squared Error for Column", i + 1, ":", MSE)