-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
ETSformer.py
110 lines (94 loc) · 4.49 KB
/
ETSformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import torch
import torch.nn as nn
from layers.Embed import DataEmbedding
from layers.ETSformer_EncDec import EncoderLayer, Encoder, DecoderLayer, Decoder, Transform
class Model(nn.Module):
"""
Paper link: https://arxiv.org/abs/2202.01381
"""
def __init__(self, configs):
super(Model, self).__init__()
self.task_name = configs.task_name
self.seq_len = configs.seq_len
self.label_len = configs.label_len
if self.task_name == 'classification' or self.task_name == 'anomaly_detection' or self.task_name == 'imputation':
self.pred_len = configs.seq_len
else:
self.pred_len = configs.pred_len
assert configs.e_layers == configs.d_layers, "Encoder and decoder layers must be equal"
# Embedding
self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,
configs.dropout)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
configs.d_model, configs.n_heads, configs.enc_in, configs.seq_len, self.pred_len, configs.top_k,
dim_feedforward=configs.d_ff,
dropout=configs.dropout,
activation=configs.activation,
) for _ in range(configs.e_layers)
]
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
configs.d_model, configs.n_heads, configs.c_out, self.pred_len,
dropout=configs.dropout,
) for _ in range(configs.d_layers)
],
)
self.transform = Transform(sigma=0.2)
if self.task_name == 'classification':
self.act = torch.nn.functional.gelu
self.dropout = nn.Dropout(configs.dropout)
self.projection = nn.Linear(configs.d_model * configs.seq_len, configs.num_class)
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
with torch.no_grad():
if self.training:
x_enc = self.transform.transform(x_enc)
res = self.enc_embedding(x_enc, x_mark_enc)
level, growths, seasons = self.encoder(res, x_enc, attn_mask=None)
growth, season = self.decoder(growths, seasons)
preds = level[:, -1:] + growth + season
return preds
def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
res = self.enc_embedding(x_enc, x_mark_enc)
level, growths, seasons = self.encoder(res, x_enc, attn_mask=None)
growth, season = self.decoder(growths, seasons)
preds = level[:, -1:] + growth + season
return preds
def anomaly_detection(self, x_enc):
res = self.enc_embedding(x_enc, None)
level, growths, seasons = self.encoder(res, x_enc, attn_mask=None)
growth, season = self.decoder(growths, seasons)
preds = level[:, -1:] + growth + season
return preds
def classification(self, x_enc, x_mark_enc):
res = self.enc_embedding(x_enc, None)
_, growths, seasons = self.encoder(res, x_enc, attn_mask=None)
growths = torch.sum(torch.stack(growths, 0), 0)[:, :self.seq_len, :]
seasons = torch.sum(torch.stack(seasons, 0), 0)[:, :self.seq_len, :]
enc_out = growths + seasons
output = self.act(enc_out) # the output transformer encoder/decoder embeddings don't include non-linearity
output = self.dropout(output)
# Output
output = output * x_mark_enc.unsqueeze(-1) # zero-out padding embeddings
output = output.reshape(output.shape[0], -1) # (batch_size, seq_length * d_model)
output = self.projection(output) # (batch_size, num_classes)
return output
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':
dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)
return dec_out[:, -self.pred_len:, :] # [B, L, D]
if self.task_name == 'imputation':
dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)
return dec_out # [B, L, D]
if self.task_name == 'anomaly_detection':
dec_out = self.anomaly_detection(x_enc)
return dec_out # [B, L, D]
if self.task_name == 'classification':
dec_out = self.classification(x_enc, x_mark_enc)
return dec_out # [B, N]
return None