PriceForecast/八个维度demo.py

201 lines
7.7 KiB
Python
Raw Normal View History

2024-11-01 16:38:21 +08:00
import pandas as pd
from datasetsforecast.long_horizon import LongHorizon
# Change this to your own data to try the model
Y_df, _, _ = LongHorizon.load(directory='./', group='ETTm2')
Y_df['ds'] = pd.to_datetime(Y_df['ds'])
# For this excercise we are going to take 20% of the DataSet
n_time = len(Y_df.ds.unique())
val_size = int(.2 * n_time)
test_size = int(.2 * n_time)
Y_df.groupby('unique_id').head(2)
import matplotlib.pyplot as plt
# We are going to plot the temperature of the transformer
# and marking the validation and train splits
u_id = 'HUFL'
x_plot = pd.to_datetime(Y_df[Y_df.unique_id==u_id].ds)
y_plot = Y_df[Y_df.unique_id==u_id].y.values
x_val = x_plot[n_time - val_size - test_size]
x_test = x_plot[n_time - test_size]
fig = plt.figure(figsize=(10, 5))
fig.tight_layout()
plt.plot(x_plot, y_plot)
plt.xlabel('Date', fontsize=17)
plt.ylabel('HUFL [15 min temperature]', fontsize=17)
plt.axvline(x_val, color='black', linestyle='-.')
plt.axvline(x_test, color='black', linestyle='-.')
plt.text(x_val, 5, ' Validation', fontsize=12)
plt.text(x_test, 5, ' Test', fontsize=12)
plt.grid()
from ray import tune
from neuralforecast.auto import AutoNHITS
from neuralforecast.core import NeuralForecast
horizon = 96 # 24hrs = 4 * 15 min.
# Use your own config or AutoNHITS.default_config
nhits_config = {
"learning_rate": tune.choice([1e-3]), # Initial Learning rate
"max_steps": tune.choice([1000]), # Number of SGD steps
"input_size": tune.choice([5 * horizon]), # input_size = multiplier * horizon
"batch_size": tune.choice([7]), # Number of series in windows
"windows_batch_size": tune.choice([256]), # Number of windows in batch
"n_pool_kernel_size": tune.choice([[2, 2, 2], [16, 8, 1]]), # MaxPool's Kernelsize
"n_freq_downsample": tune.choice([[168, 24, 1], [24, 12, 1], [1, 1, 1]]), # Interpolation expressivity ratios
"activation": tune.choice(['ReLU']), # Type of non-linear activation
"n_blocks": tune.choice([[1, 1, 1]]), # Blocks per each 3 stacks
"mlp_units": tune.choice([[[512, 512], [512, 512], [512, 512]]]), # 2 512-Layers per block for each stack
"interpolation_mode": tune.choice(['linear']), # Type of multi-step interpolation
"val_check_steps": tune.choice([100]), # Compute validation every 100 epochs
"random_seed": tune.randint(1, 10),
}
tft_config = {
"input_size": tune.choice([horizon]),
"hidden_size": tune.choice([32]),
"n_head": tune.choice([2]),
"learning_rate": tune.loguniform(1e-4, 1e-1),
"scaler_type": tune.choice(['robust', 'standard']),
"max_steps": tune.choice([500, 1000]),
"windows_batch_size": tune.choice([32]),
"check_val_every_n_epoch": tune.choice([100]),
"random_seed": tune.randint(1, 20),
}
tsmixer_config = {
"input_size": input_size, # Size of input window
"max_steps": tune.choice([500, 1000, 2000]), # Number of training iterations
"val_check_steps": 100, # Compute validation every x steps
"early_stop_patience_steps": 5, # Early stopping steps
"learning_rate": tune.loguniform(1e-4, 1e-2), # Initial Learning rate
"n_block": tune.choice([1, 2, 4, 6, 8]), # Number of mixing layers
"dropout": tune.uniform(0.0, 0.99), # Dropout
"ff_dim": tune.choice([32, 64, 128]), # Dimension of the feature linear layer
"scaler_type": 'identity',
}
tsmixerx_config = tsmixer_config.copy()
tsmixerx_config['futr_exog_list'] = ['ex_1', 'ex_2', 'ex_3', 'ex_4']
models = [AutoNHITS(h=horizon,
config=nhits_config,
num_samples=5),
AutoTFT(h=horizon,
loss=MAE(),
config=tft_config,
num_samples=3),
TSMixer(h=horizon,
input_size=input_size,
n_series=7,
max_steps=1000,
val_check_steps=100,
early_stop_patience_steps=5,
scaler_type='identity',
valid_loss=MAE(),
random_seed=12345678,
),
TSMixerx(h=horizon,
input_size=input_size,
n_series=7,
max_steps=1000,
val_check_steps=100,
early_stop_patience_steps=5,
scaler_type='identity',
dropout=0.7,
valid_loss=MAE(),
random_seed=12345678,
futr_exog_list=['ex_1', 'ex_2', 'ex_3', 'ex_4'],
),
MLPMultivariate(h=horizon,
input_size=input_size,
n_series=7,
max_steps=1000,
val_check_steps=100,
early_stop_patience_steps=5,
scaler_type='standard',
hidden_size=256,
valid_loss=MAE(),
random_seed=12345678,
),
NHITS(h=horizon,
input_size=horizon,
max_steps=1000,
val_check_steps=100,
early_stop_patience_steps=5,
scaler_type='robust',
valid_loss=MAE(),
random_seed=12345678,
),
AutoTSMixer(h=horizon,
n_series=7,
loss=MAE(),
config=tsmixer_config,
num_samples=10,
search_alg=HyperOptSearch(),
backend='ray',
valid_loss=MAE()) ,
AutoTSMixerx(h=horizon,
n_series=7,
loss=MAE(),
config=tsmixerx_config,
num_samples=10,
search_alg=HyperOptSearch(),
backend='ray',
valid_loss=MAE()) ]
nf = NeuralForecast(
models=models,
freq='15min')
Y_hat_df = nf.cross_validation(df=Y_df, val_size=val_size,
test_size=test_size, n_windows=None)
nf.models[0].results.get_best_result().config
y_true = Y_hat_df.y.values
y_hat = Y_hat_df['AutoNHITS'].values
n_series = len(Y_df.unique_id.unique())
y_true = y_true.reshape(n_series, -1, horizon)
y_hat = y_hat.reshape(n_series, -1, horizon)
print('Parsed results')
print('2. y_true.shape (n_series, n_windows, n_time_out):\t', y_true.shape)
print('2. y_hat.shape (n_series, n_windows, n_time_out):\t', y_hat.shape)
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(10, 11))
fig.tight_layout()
series = ['HUFL','HULL','LUFL','LULL','MUFL','MULL','OT']
series_idx = 3
for idx, w_idx in enumerate([200, 300, 400]):
axs[idx].plot(y_true[series_idx, w_idx,:],label='True')
axs[idx].plot(y_hat[series_idx, w_idx,:],label='Forecast')
axs[idx].grid()
axs[idx].set_ylabel(series[series_idx]+f' window {w_idx}',
fontsize=17)
if idx==2:
axs[idx].set_xlabel('Forecast Horizon', fontsize=17)
plt.legend()
plt.show()
plt.close()
from neuralforecast.losses.numpy import mae, mse
print('MAE: ', mae(y_hat, y_true))
print('MSE: ', mse(y_hat, y_true))