pytorch lstm 预测,pytorch lstm训练例子
本文主要介绍PyTorch构建LSTM实现多元时间序列预测和负荷预测。有需要的朋友可以借鉴一下,希望能有所帮助。祝大家进步很大,早日升职加薪。
00-1010i。前言二。数据处理。LSTM模式四。培训与测试。源代码和数据
目录
在上一篇文章中,PyTorch搭建了LSTM实现时间序列预测(负荷预测),我们用LSTM实现了负荷预测,但是我们只是简单的用负荷来预测负荷,并没有利用其他的环境变量,比如温度和湿度。
本文主要考虑用PyTorch建立LSTM,实现多元时间序列预测。
系列文章:
PyTorch构建LSTM实现多变量多步长时序负荷预测
PyTorch深度学习LSTM从输入到线性输出
PyTorch建设LSTM实现时序负荷预测
PyTorch构建双向LSTM实现时序负荷预测
I. 前言
数据集是某个地区某段时间的电力负荷数据。除了负载,还包括温度、湿度等信息。
本文根据前24个时刻的载荷和该时刻的环境变量来预测下一时刻的载荷。
定义加载数据(文件名):
全局最大值、最小值
df=PD . read _ CSV(OS . path . dirname(OS . getcwd())/data/new _ data/ file _ name,encoding=gbk )
columns=df.columns
df.fillna(df.mean(),inplace=True)
MAX=np.max(df[columns[1]])
MIN=NP . MIN(df[列[1]])
df[columns[1]]=(df[columns[1]]-MIN)/(MAX-MIN)
返回df
类MyDataset(数据集):
def __init__(self,data):
self.data=数据
def __getitem__(self,item):
返回自身数据[项目]
def __len__(self):
返回len(自身数据)
def nn_seq(文件名,B):
打印(“处理数据:”)
数据=加载数据(文件名)
load=data[data.columns[1]]
load=load.tolist()
data=data.values.tolist()
序列=[]
对于范围内的I(len(data)-24):
train_seq=[]
train_label=[]
对于范围(I,I ^ 24):内的j
x=[load[j]]
对于范围(2,8):中的c
x.append(data[j][c])
训练序列追加(x)
train_label.append(load[i 24])
train_seq=火炬。浮动处理器(训练序列)
train_label=火炬。FloatTensor(train_label)。视图(-1)
序列追加((train_seq,train_label))
#打印(序列号[:5])
DTR=seq[0: int(len(seq)* 0.7)]
DTE=seq[int(len(seq)* 0.7): len(seq)]
train_len=int(len(Dtr)/B) * B
test_len=int(len(Dte)/B) * B
Dtr,Dte=Dtr[:测试长度],Dte[:测试长度]
train=MyDataset(Dtr)
测试=我的数据集(Dte)
Dtr = DataLoader(dataset=train, batch_size=B, shuffle=False, num_workers=0)
Dte = DataLoader(dataset=test, batch_size=B, shuffle=False, num_workers=0)
return Dtr, Dte
上面代码用了DataLoader来对原始数据进行处理,最终得到了batch_size=B的数据集Dtr和Dte,Dtr为训练集,Dte为测试集。
任意输出Dte中的一条数据:
[(tensor([[0.3513, 0.0000, 0.9091, 0.0000, 0.6667, 0.3023, 0.2439],[0.3333, 0.0000, 0.9091, 0.0435, 0.6667, 0.3023, 0.2439],
[0.3396, 0.0000, 0.9091, 0.0870, 0.6667, 0.3023, 0.2439],
[0.3427, 0.0000, 0.9091, 0.1304, 0.6667, 0.3023, 0.2439],
[0.3838, 0.0000, 0.9091, 0.1739, 0.6667, 0.3023, 0.2439],
[0.3700, 0.0000, 0.9091, 0.2174, 0.6667, 0.3023, 0.2439],
[0.4288, 0.0000, 0.9091, 0.2609, 0.6667, 0.3023, 0.2439],
[0.4474, 0.0000, 0.9091, 0.3043, 0.6667, 0.3023, 0.2439],
[0.4406, 0.0000, 0.9091, 0.3478, 0.6667, 0.3023, 0.2439],
[0.4657, 0.0000, 0.9091, 0.3913, 0.6667, 0.3023, 0.2439],
[0.4540, 0.0000, 0.9091, 0.4348, 0.6667, 0.3023, 0.2439],
[0.4939, 0.0000, 0.9091, 0.4783, 0.6667, 0.3023, 0.2439],
[0.4328, 0.0000, 0.9091, 0.5217, 0.6667, 0.3023, 0.2439],
[0.4238, 0.0000, 0.9091, 0.5652, 0.6667, 0.3023, 0.2439],
[0.4779, 0.0000, 0.9091, 0.6087, 0.6667, 0.3023, 0.2439],
[0.4591, 0.0000, 0.9091, 0.6522, 0.6667, 0.3023, 0.2439],
[0.4651, 0.0000, 0.9091, 0.6957, 0.6667, 0.3023, 0.2439],
[0.5102, 0.0000, 0.9091, 0.7391, 0.6667, 0.3023, 0.2439],
[0.5067, 0.0000, 0.9091, 0.7826, 0.6667, 0.3023, 0.2439],
[0.4635, 0.0000, 0.9091, 0.8261, 0.6667, 0.3023, 0.2439],
[0.4224, 0.0000, 0.9091, 0.8696, 0.6667, 0.3023, 0.2439],
[0.3796, 0.0000, 0.9091, 0.9130, 0.6667, 0.3023, 0.2439],
[0.3292, 0.0000, 0.9091, 0.9565, 0.6667, 0.3023, 0.2439],
[0.2940, 0.0000, 0.9091, 1.0000, 0.6667, 0.3023, 0.2439]]), tensor([0.3675]))]
每一行对应一个时刻点的负荷以及环境变量,此时input_size=7。
III. LSTM模型
这里采用了深入理解PyTorch中LSTM的输入和输出(从input输入到Linear输出)中的模型:
class LSTM(nn.Module):def __init__(self, input_size, hidden_size, num_layers, output_size, batch_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.num_directions = 1
self.batch_size = batch_size
self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True)
self.linear = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input_seq):
h_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(device)
c_0 = torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(device)
# print(input_seq.size())
seq_len = input_seq.shape[1]
# input(batch_size, seq_len, input_size)
input_seq = input_seq.view(self.batch_size, seq_len, self.input_size)
# output(batch_size, seq_len, num_directions * hidden_size)
output, _ = self.lstm(input_seq, (h_0, c_0))
# print(output.size=, output.size())
# print(self.batch_size * seq_len, self.hidden_size)
output = output.contiguous().view(self.batch_size * seq_len, self.hidden_size) # (5 * 30, 64)
pred = self.linear(output) # pred()
# print(pred=, pred.shape)
pred = pred.view(self.batch_size, seq_len, -1)
pred = pred[:, -1, :]
return pred
IV. 训练
def LSTM_train(name, b):Dtr, Dte = nn_seq(file_name=name, B=b)
input_size, hidden_size, num_layers, output_size = 7, 64, 1, 1
model = LSTM(input_size, hidden_size, num_layers, output_size, batch_size=b).to(device)
loss_function = nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
# 训练
epochs = 30
for i in range(epochs):
cnt = 0
print(当前, i)
for (seq, label) in Dtr:
cnt += 1
seq = seq.to(device)
label = label.to(device)
y_pred = model(seq)
loss = loss_function(y_pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if cnt % 100 == 0:
print(epoch, i, :, cnt - 100, ~, cnt, loss.item())
state = {model: model.state_dict(), optimizer: optimizer.state_dict()}
torch.save(state, LSTM_PATH)
V. 测试
def test(name, b):global MAX, MIN
Dtr, Dte = nn_seq(file_name=name, B=b)
pred = []
y = []
print(loading model...)
input_size, hidden_size, num_layers, output_size = 7, 64, 1, 1
model = LSTM(input_size, hidden_size, num_layers, output_size, batch_size=b).to(device)
model.load_state_dict(torch.load(LSTM_PATH)[model])
model.eval()
print(predicting...)
for (seq, target) in Dte:
target = list(chain.from_iterable(target.data.tolist()))
y.extend(target)
seq = seq.to(device)
with torch.no_grad():
y_pred = model(seq)
y_pred = list(chain.from_iterable(y_pred.data.tolist()))
pred.extend(y_pred)
y, pred = np.array([y]), np.array([pred])
y = (MAX - MIN) * y + MIN
pred = (MAX - MIN) * pred + MIN
print(accuracy:, get_mape(y, pred))
# plot
x = [i for i in range(1, 151)]
x_smooth = np.linspace(np.min(x), np.max(x), 900)
y_smooth = make_interp_spline(x, y.T[150:300])(x_smooth)
plt.plot(x_smooth, y_smooth, c=green, marker=*, ms=1, alpha=0.75, label=true)
y_smooth = make_interp_spline(x, pred.T[150:300])(x_smooth)
plt.plot(x_smooth, y_smooth, c=red, marker=o, ms=1, alpha=0.75, label=pred)
plt.grid(axis=y)
plt.legend()
plt.show()
我只是训练了30轮,MAPE为7.83%:
VI. 源码及数据
源码及数据我放在了GitHub上,LSTM-Load-Forecasting
以上就是PyTorch搭建LSTM实现多变量时序负荷预测的详细内容,更多关于PyTorch LSTM多变量时序负荷预测的资料请关注盛行IT软件开发工作室其它相关文章!
郑重声明:本文由网友发布,不代表盛行IT的观点,版权归原作者所有,仅为传播更多信息之目的,如有侵权请联系,我们将第一时间修改或删除,多谢。