lstm初步(二维数据)
# num_words = 30000# maxlen = 200(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=num_words)# print(x_train.shape, ' ', y_train.shape)# print(x_test.shape, ' ', y_tes...
·
# num_words = 30000
# maxlen = 200
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=num_words)
# print(x_train.shape, ' ', y_train.shape)
# print(x_test.shape, ' ', y_test.shape)
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen, padding='post')
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen, padding='post')
print(x_train.shape, ' ', y_train.shape)
x_train = x_train.reshape(-1,500,200)
y_train = y_train.reshape(-1,500)
def lstm_model():
model = keras.Sequential()`在这里插入代码片`
model.add(keras.layers.LSTM(64, input_shape=(500,200) ,return_sequences=True))
model.add(keras.layers.LSTM(500, activation='sigmoid', return_sequences=False))
model.compile(optimizer=keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
model = lstm_model()
tt=model.fit(x_train, y_train, epochs=3, batch_size=len(x_train), verbose=2, shuffle=False)

DAMO开发者矩阵,由阿里巴巴达摩院和中国互联网协会联合发起,致力于探讨最前沿的技术趋势与应用成果,搭建高质量的交流与分享平台,推动技术创新与产业应用链接,围绕“人工智能与新型计算”构建开放共享的开发者生态。
更多推荐
所有评论(0)