0
我是Keras的新手,一直在努力正确地塑造数据。现在我已经尝试了几个星期,这是我收到的最接近的。我很确定我只是强迫事情工作,不得不调整数据的形状。几个问题:如何正确输入形状或input_dim?
- 模型,损失,优化或激活函数是否确定input_shape或input_dim需要的形状/尺寸?
如果不是如何将数据整形为正确的形式。
我试图将数据整形为(1,1,59),但是我会接到抱怨说目标数据的形状是(1,1,19)。现在我知道如何去做的唯一方法是将数据削减一半,使其成为一个平坦的形状,但我想只使用20%的数据来创建一个新的集合。
我的代码: 我试图做的是有模型从1学习序列 - 100 然后给出了一些应该预测未来数应该是什么。
# Tool setup
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Setup our dataset and testset.
dataset = [] # Training set.
validset = []
testset = []
dataset = list(range(60))
validset = list(range(60, 80))
testset = list(range(80, 100))
# Preprocess Data:
X_train = dataset[:-1] # Drop the last element.
Y_train = dataset[1:] # The second element is the target for prediction.
# Reshape training data for Keras LSTM model
# The training data needs to be (batchIndex, timeStepIndex, dimensionIndex)
# Single batch, time steps, dimensions
#print(np.array(X_train).shape)
X_train = np.array(X_train).reshape(-1, 59, 1)
Y_train = np.array(Y_train).reshape(-1, 59, 1)
# Normalize the Data:
#X_train = np.divide(X_train, 200)
#Y_train = np.divide(Y_train, 200)
X_test = validset[:-1] # Drop the last element.
Y_test = validset[1:] # The second element is the target for prediction.
#print(np.array(X_test).shape)
X_test = np.array(X_test).reshape(-1, 19, 1)
Y_test = np.array(Y_test).reshape(-1, 19, 1)
# Build Model
model = Sequential()
#model.add(LSTM(100, input_dim=1, return_sequences=True,
activation='softmax'))
model.add(LSTM(100, input_dim=1, return_sequences=True))
model.add(Dense(1))
model.compile(loss='mse', optimizer='rmsprop', metrics=["accuracy"])
#model.add(Dropout(0.80))
# Train the Model
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
nb_epoch=10, batch_size=1, verbose=1)
# The validation set is checked during training to monitor progress, and
possibly for early stopping,
# but is never used for gradient descent.
# validation_data is used as held-out validation data. Will override
validation_split.
# validation_data=(X_test, Y_test)
# validation_split is the Fraction of the data to use as held-out validation
data.
# validation_split=0.083
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper left')
plt.show()
# Test the Model
#print(np.array(testset).shape)
testset = np.array(testset).reshape(-1, 5, 1)
predict = model.predict(testset)
# Undo the normalization step.
#predict = np.multiply(data, 200)
predict = predict.reshape(-1)
print(predict[0])