python Deep learning 学习笔记(6) (2)

结果

python Deep learning 学习笔记(6)

python Deep learning 学习笔记(6)

可见此次结果比SimpleRNN网络要好一些,主要是因为LSTM 受梯度消失问题的影响要小得多
LSTM适用于评论分析全局的长期性结构

可以提高循环神经网络的性能和泛化能力的三种高级技巧

循环 dropout(recurrent dropout)。这是一种特殊的内置方法,在循环层中使用 dropout 来降低过拟合

堆叠循环层(stacking recurrent layers)。这会提高网络的表示能力(代价是更高的计算负荷)

双向循环层(bidirectional recurrent layer)。将相同的信息以不同的方式呈现给循环网络,可以提高精度并缓解遗忘问题

门控循环单元(GRU,gated recurrent unit)层的工作原理与 LSTM 相同。但它做了一些简化,因此运
行的计算代价更低(虽然表示能力可能不如 LSTM),GRU层通常更善于记住最近的数据,而不是久远的数据

使用以上三种种方式来进行温度预测

import os import numpy as np from matplotlib import pyplot as plt from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop from keras import models data_dir = 'E:\\study\\dataset' fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv') f = open(fname) data = f.read() f.close() lines = data.split('\n') header = lines[0].split(',') lines = lines[1:] print(header) print(len(lines)) # 将数据转换成一个 Numpy 数组 float_data = np.zeros((len(lines), len(header) - 1)) for i, line in enumerate(lines): values = [float(x) for x in line.split(',')[1:]] float_data[i, :] = values # 温度 temp = float_data[:, 1] plt.plot(range(len(temp)), temp) plt.show() # 前 10 天的温度时间序列 plt.plot(range(1440), temp[:1440]) plt.show() # 数据标准化 # 将使用前200 000 个时间步作为训练数据 mean = float_data[:200000].mean(axis=0) float_data -= mean std = float_data[:200000].std(axis=0) float_data /= std # 生成时间序列样本及其目标的生成器 def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6): """ :param data: 浮点数数据组成的原始数组 :param lookback: 输入数据应该包括过去多少个时间步 :param delay: 目标应该在未来多少个时间步之后 :param min_index: 数组中的索引 :param max_index: 数组中的索引 :param shuffle: 是打乱样本,还是按顺序抽取样本 :param batch_size: 每个批量的样本数 :param step: 数据采样的周期 :return: """ if max_index is None: max_index = len(data) - delay - 1 i = min_index + lookback while 1: if shuffle: rows = np.random.randint(min_index + lookback, max_index, size=batch_size) else: if i + batch_size >= max_index: i = min_index + lookback rows = np.arange(i, min(i + batch_size, max_index)) i += len(rows) samples = np.zeros((len(rows), lookback // step, data.shape[-1])) targets = np.zeros((len(rows),)) for j, row in enumerate(rows): indices = range(rows[j] - lookback, rows[j], step) samples[j] = data[indices] targets[j] = data[rows[j] + delay][1] yield samples, targets # 准备训练生成器、验证生成器和测试生成器 lookback = 1440 step = 6 delay = 144 batch_size = 128 train_gen = generator(float_data, lookback=lookback, delay=delay, min_index=0, max_index=200000, shuffle=True, step=step, batch_size=batch_size) val_gen = generator(float_data, lookback=lookback, delay=delay, min_index=200001, max_index=300000, step=step, batch_size=batch_size) test_gen = generator(float_data, lookback=lookback, delay=delay, min_index=300001, max_index=None, step=step, batch_size=batch_size) # 查看,需要从 generate 中抽取多少次 val_steps = (300000 - 200001 - lookback) // batch_size test_steps = (len(float_data) - 300001 - lookback) // batch_size def get_base_model_history(): model = Sequential() model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1]))) model.add(layers.Dense(32, activation='relu')) model.add(layers.Dense(1)) model.compile(optimizer=RMSprop(), loss='mae', metrics=['acc']) history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=20, validation_data=val_gen, validation_steps=val_steps) return history # 使用GRU 的模型 def get_gru_model_history(): model = Sequential() model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer=RMSprop(), loss='mae', metrics=['acc']) history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=20, validation_data=val_gen, validation_steps=val_steps) return history # 使用 dropout 正则化的基于 GRU 的模型 def get_gru_model_with_dropout_history(): model = Sequential() model.add(layers.GRU(32, dropout=0.2, recurrent_dropout=0.2, input_shape=(None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer=RMSprop(), loss='mae', metrics=['acc']) history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=40, validation_data=val_gen, validation_steps=val_steps) model.save('gru_model_with_dropout.h5') return history # 使用 dropout 正则化的堆叠 GRU 模型 def get_mul_gru_model_with_dropout_history(): model = Sequential() model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5, return_sequences=True, input_shape=(None, float_data.shape[-1]))) model.add(layers.GRU(64, activation='relu', dropout=0.1, recurrent_dropout=0.5)) model.add(layers.Dense(1)) model.compile(optimizer=RMSprop(), loss='mae', metrics=['acc']) history = model.fit_generator(train_gen, steps_per_epoch=500, epochs=40, validation_data=val_gen, validation_steps=val_steps) model.save('mul_gru_model_with_dropout') return history def draw_loss(history): loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() draw_loss(get_base_model_history()) draw_loss(history=get_gru_model_history()) draw_loss(history=get_gru_model_with_dropout_history()) draw_loss(history=get_mul_gru_model_with_dropout_history())

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/zywjgz.html