PriceForecast/models/nerulforcastmodels.py

3409 lines
163 KiB
Python
Raw Normal View History

2024-11-01 16:38:21 +08:00
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
2025-03-05 09:47:02 +08:00
from lib.tools import Graphs, mse, rmse, mae, exception_logger
from lib.tools import save_to_database, get_week_date
2024-11-01 16:38:21 +08:00
from lib.dataread import *
from neuralforecast import NeuralForecast
2025-03-05 09:47:02 +08:00
from neuralforecast.models import NHITS, Informer, NBEATSx, LSTM, PatchTST, iTransformer, TSMixer
2024-11-01 16:38:21 +08:00
from neuralforecast.models import RNN, GRU, TCN, DeepAR, DilatedRNN, MLP, NBEATS, DLinear, NLinear, TFT, VanillaTransformer
2025-03-05 09:47:02 +08:00
from neuralforecast.models import Autoformer, PatchTST, FEDformer, StemGNN, HINT, TSMixer, TSMixerx, MLPMultivariate, BiTCN, TiDE, DeepNPTS
2024-11-01 16:38:21 +08:00
from tensorflow.keras.losses import MAE
from scipy.stats import spearmanr
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn import metrics
from lib.duojinchengpredict import testSetPredict
from reportlab.platypus import Table, SimpleDocTemplate, Paragraph, Image # 报告内容相关类
from reportlab.lib.pagesizes import letter # 页面的标志尺寸(8.5*inch, 11*inch)
from reportlab.pdfbase import pdfmetrics # 注册字体
2025-03-05 09:47:02 +08:00
from reportlab.pdfbase.ttfonts import TTFont # 字体类
2024-11-01 16:38:21 +08:00
from reportlab.platypus import Table, SimpleDocTemplate, Paragraph, Image # 报告内容相关类
from reportlab.lib.pagesizes import letter # 页面的标志尺寸(8.5*inch, 11*inch)
from reportlab.lib.styles import getSampleStyleSheet # 文本样式
from reportlab.lib import colors # 颜色模块
from reportlab.graphics.charts.barcharts import VerticalBarChart # 图表类
from reportlab.graphics.charts.legends import Legend # 图例类
from reportlab.graphics.shapes import Drawing # 绘图工具
from reportlab.lib.units import cm # 单位cm
# # 注册字体(提前准备好字体文件, 如果同一个文件需要多种字体可以注册多个)
pdfmetrics.registerFont(TTFont('SimSun', 'SimSun.ttf'))
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def ex_Model(df, horizon, input_size, train_steps, val_check_steps, early_stop_patience_steps,
is_debug, dataset, is_train, is_fivemodels, val_size, test_size, settings, now,
etadata, modelsindex, data, is_eta, end_time):
2024-11-01 16:38:21 +08:00
'''
模型训练与预测
:param df: 数据集
horizon # 预测的步长
input_size # 输入序列长度
train_steps # 训练步数,用来限定epoch次数
val_check_steps # 评估频率
early_stop_patience_steps # 早停的耐心步数
:return: 预测结果
'''
# 模型预测列表列名
# columns2 = [
# 'NHITS',
# 'Informer',
# 'LSTM',
# 'iTransformer',
# 'TSMixer',
# 'TSMixerx',
# 'PatchTST',
# 'RNN',
# 'GRU',
# 'TCN',
# # 'DeepAR',
# 'DeepAR-median',
# 'DeepAR-lo-90',
# 'DeepAR-lo-80',
# 'DeepAR-hi-80',
# 'DeepAR-hi-90',
# 'BiTCN',
# 'DilatedRNN',
# 'MLP',
2025-02-13 13:30:08 +08:00
# 'DLinear',
# 'NLinear',
# 'TFT',
# 'FEDformer',
# 'StemGNN',
# 'MLPMultivariate',
# 'TiDE',
# 'DeepNPT',
# ]
2025-03-05 09:47:02 +08:00
df = df.replace(',', '', regex=True)
2025-02-13 13:30:08 +08:00
df = df.rename(columns={'date': 'ds'})
df['y'] = pd.to_numeric(df['y'], errors='coerce')
2025-03-05 09:47:02 +08:00
# 使用errors='coerce'来处理无效日期
df['ds'] = pd.to_datetime(df['ds'], errors='coerce')
2025-02-13 13:30:08 +08:00
# df 数值列转为 float32
for col in df.select_dtypes(include=['int']).columns:
df[col] = df[col].astype(np.float32)
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 不筛选特征用下面的
df_reg = df
df_reg.sort_values('ds', inplace=True)
if is_debug:
df_reg = df_reg[-1000:-1]
# 计算训练集的结束索引占总数据的90%
2025-03-05 09:47:02 +08:00
split_index = int(0.9 * len(df_reg))
2025-02-13 13:30:08 +08:00
# 按照时间顺序划分训练集和测试集
df_train = df_reg[:split_index]
df_test = df_reg[-split_index:]
df_train['unique_id'] = 1
df_test['unique_id'] = 1
# 显示划分后的数据集的前几行
config.logger.info("Training set head:")
config.logger.info(df_train.head())
2025-02-13 13:30:08 +08:00
config.logger.info("\nTesting set head:")
config.logger.info(df_test.head())
2025-02-13 13:30:08 +08:00
models = [
2025-03-05 09:47:02 +08:00
NHITS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', activation='ReLU', early_stop_patience_steps=early_stop_patience_steps),
Informer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
LSTM(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
iTransformer(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TSMixer(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
early_stop_patience_steps=early_stop_patience_steps),
TSMixerx(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
early_stop_patience_steps=early_stop_patience_steps),
PatchTST(h=horizon, input_size=input_size, max_steps=train_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
RNN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
GRU(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TCN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2025-02-13 13:30:08 +08:00
# DeepAR(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2025-03-05 09:47:02 +08:00
BiTCN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DilatedRNN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
MLP(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DLinear(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
NLinear(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TFT(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
FEDformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
StemGNN(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
MLPMultivariate(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
val_check_steps=val_check_steps, scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TiDE(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DeepNPTS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2025-02-13 13:30:08 +08:00
# VanillaTransformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ), //报错了
# Autoformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ), //报错了
2025-03-13 10:33:47 +08:00
NBEATS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ),
2025-02-13 13:30:08 +08:00
# NBEATSx (h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard',activation='ReLU', ), //报错
# HINT(h=horizon),
2025-03-05 09:47:02 +08:00
2025-02-13 13:30:08 +08:00
]
if is_fivemodels:
# 获取之前存好的最好的五个模型
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'best_modelnames.txt'), 'r', encoding='utf-8') as f:
2025-02-13 13:30:08 +08:00
best_modelnames = f.readlines()[0]
2025-03-11 11:25:43 +08:00
config.logger.info(f'获取本地最佳模型名称:{best_modelnames}')
2025-03-05 09:47:02 +08:00
# 重新拼接models
2025-02-13 13:30:08 +08:00
all_models = models
models = []
for model in all_models:
if model._get_name() in best_modelnames:
models.append(model)
# 创建NeuralForecast实例并训练模型
# freq = 'B'
nf = NeuralForecast(models=models, freq=config.freq[:1])
2025-02-13 13:30:08 +08:00
from joblib import dump, load
if is_train:
# 模型交叉验证
2025-03-05 09:47:02 +08:00
nf_preds = nf.cross_validation(
df=df_train, val_size=val_size, test_size=test_size, n_windows=None)
nf_preds.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"cross_validation.csv"), index=False)
2025-03-05 09:47:02 +08:00
2025-02-13 13:30:08 +08:00
nf_preds = nf_preds.reset_index()
# 保存模型
# 生成文件名,按时间 精确到分
filename = f'{settings}--{now}.joblib'
2025-03-05 09:47:02 +08:00
# 文件名去掉冒号
filename = filename.replace(':', '-') # 替换冒号
2025-03-11 11:25:43 +08:00
dump(nf, os.path.join(config.dataset, filename))
2025-02-13 13:30:08 +08:00
else:
# glob获取dataset下最新的joblib文件
import glob
2025-03-05 09:47:02 +08:00
filename = max(glob.glob(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'*.joblib')), key=os.path.getctime)
config.logger.info('读取模型:' + filename)
2025-02-13 13:30:08 +08:00
nf = load(filename)
# 测试集预测
2025-03-05 09:47:02 +08:00
nf_test_preds = nf.cross_validation(
df=df_test, val_size=val_size, test_size=test_size, n_windows=None)
2025-02-13 13:30:08 +08:00
# 测试集预测结果保存
2025-03-05 09:47:02 +08:00
nf_test_preds.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"cross_validation.csv"), index=False)
2025-02-13 13:30:08 +08:00
df_test['ds'] = pd.to_datetime(df_test['ds'], errors='coerce')
2025-03-05 09:47:02 +08:00
# 进行未来时间预测
df_predict = nf.predict(df_test).reset_index()
2025-02-13 13:30:08 +08:00
# 去掉index列
2025-02-25 16:03:02 +08:00
if 'index' in df_predict.columns:
df_predict.drop(columns=['index'], inplace=True)
2025-03-05 09:47:02 +08:00
df_predict.astype(
{col: 'float32' for col in df_predict.columns if col not in ['ds']})
2025-02-13 13:30:08 +08:00
# 添加预测时间
df_predict['created_dt'] = end_time
2025-03-05 09:47:02 +08:00
2025-02-13 13:30:08 +08:00
# 保存预测值
2025-03-11 11:25:43 +08:00
df_predict.to_csv(os.path.join(config.dataset, "predict.csv"), index=False)
2025-02-13 13:30:08 +08:00
# 将预测结果保存到数据库
save_to_database(config.sqlitedb, df_predict, 'predict', end_time)
2025-03-05 09:47:02 +08:00
2025-02-13 13:30:08 +08:00
# 把预测值上传到eta
if config.is_update_eta:
2025-02-13 13:30:08 +08:00
df_predict['ds'] = pd.to_datetime(df_predict['ds'])
dates = df_predict['ds'].dt.strftime('%Y-%m-%d')
for m in modelsindex.keys():
list = []
2025-03-05 09:47:02 +08:00
for date, value in zip(dates, df_predict[m].round(2)):
list.append({'Date': date, 'Value': value})
2025-02-13 13:30:08 +08:00
data['DataList'] = list
data['IndexCode'] = modelsindex[m]
data['IndexName'] = f'价格预测{m}模型'
data['Remark'] = m
2025-03-05 09:47:02 +08:00
etadata.push_data(data)
2025-02-13 13:30:08 +08:00
# return nf_test_preds
2025-03-05 09:47:02 +08:00
return
2025-02-13 13:30:08 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def ex_Model_Juxiting(df, horizon, input_size, train_steps, val_check_steps, early_stop_patience_steps,
is_debug, dataset, is_train, is_fivemodels, val_size, test_size, settings, now,
etadata, modelsindex, data, is_eta, end_time):
2025-02-13 13:30:08 +08:00
'''
模型训练与预测
:param df: 数据集
horizon # 预测的步长
input_size # 输入序列长度
train_steps # 训练步数,用来限定epoch次数
val_check_steps # 评估频率
early_stop_patience_steps # 早停的耐心步数
:return: 预测结果
'''
# 模型预测列表列名
# columns2 = [
# 'NHITS',
# 'Informer',
# 'LSTM',
# 'iTransformer',
# 'TSMixer',
# 'TSMixerx',
# 'PatchTST',
# 'RNN',
# 'GRU',
# 'TCN',
# # 'DeepAR',
# 'DeepAR-median',
# 'DeepAR-lo-90',
# 'DeepAR-lo-80',
# 'DeepAR-hi-80',
# 'DeepAR-hi-90',
# 'BiTCN',
# 'DilatedRNN',
# 'MLP',
2024-11-01 16:38:21 +08:00
# 'DLinear',
# 'NLinear',
# 'TFT',
# 'FEDformer',
# 'StemGNN',
# 'MLPMultivariate',
# 'TiDE',
# 'DeepNPT',
# ]
2025-03-05 09:47:02 +08:00
df = df.replace(',', '', regex=True)
2024-11-01 16:38:21 +08:00
df = df.rename(columns={'date': 'ds'})
df['y'] = pd.to_numeric(df['y'], errors='coerce')
2025-03-05 09:47:02 +08:00
# 使用errors='coerce'来处理无效日期
df['ds'] = pd.to_datetime(df['ds'], errors='coerce')
2024-11-01 16:38:21 +08:00
# df 数值列转为 float32
for col in df.select_dtypes(include=['int']).columns:
df[col] = df[col].astype(np.float32)
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 不筛选特征用下面的
df_reg = df
df_reg.sort_values('ds', inplace=True)
if is_debug:
df_reg = df_reg[-1000:-1]
# 计算训练集的结束索引占总数据的90%
2025-03-05 09:47:02 +08:00
split_index = int(0.9 * len(df_reg))
2024-11-01 16:38:21 +08:00
# 按照时间顺序划分训练集和测试集
df_train = df_reg[:split_index]
df_test = df_reg[-split_index:]
df_train['unique_id'] = 1
df_test['unique_id'] = 1
# 显示划分后的数据集的前几行
2025-03-11 11:25:43 +08:00
config.logger.info("Training set head:")
config.logger.info(df_train.head())
2024-11-01 16:38:21 +08:00
2025-03-11 11:25:43 +08:00
config.logger.info("\nTesting set head:")
config.logger.info(df_test.head())
2024-11-01 16:38:21 +08:00
models = [
2025-03-05 09:47:02 +08:00
NHITS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', activation='ReLU', early_stop_patience_steps=early_stop_patience_steps),
Informer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
LSTM(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
iTransformer(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TSMixer(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
early_stop_patience_steps=early_stop_patience_steps),
TSMixerx(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
early_stop_patience_steps=early_stop_patience_steps),
PatchTST(h=horizon, input_size=input_size, max_steps=train_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
RNN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
GRU(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TCN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2024-11-01 16:38:21 +08:00
# DeepAR(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2025-03-05 09:47:02 +08:00
BiTCN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DilatedRNN(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
MLP(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DLinear(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
NLinear(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TFT(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
FEDformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
StemGNN(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
MLPMultivariate(h=horizon, input_size=input_size, n_series=1, max_steps=train_steps,
val_check_steps=val_check_steps, scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
TiDE(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
DeepNPTS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
scaler_type='standard', early_stop_patience_steps=early_stop_patience_steps),
2024-11-01 16:38:21 +08:00
# VanillaTransformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ), //报错了
# Autoformer(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ), //报错了
# NBEATS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard', ),
# NBEATSx (h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, scaler_type='standard',activation='ReLU', ), //报错
# HINT(h=horizon),
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
]
if is_fivemodels:
# 获取之前存好的最好的五个模型
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'best_modelnames.txt'), 'r', encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
best_modelnames = f.readlines()[0]
2025-03-11 11:25:43 +08:00
config.logger.info(f'获取本地最佳模型名称:{best_modelnames}')
2025-03-05 09:47:02 +08:00
# 重新拼接models
2024-11-01 16:38:21 +08:00
all_models = models
models = []
for model in all_models:
if model._get_name() in best_modelnames:
models.append(model)
# 创建NeuralForecast实例并训练模型
nf = NeuralForecast(models=models, freq=freq)
2024-11-01 16:38:21 +08:00
from joblib import dump, load
if is_train:
# 模型交叉验证
2025-03-05 09:47:02 +08:00
nf_preds = nf.cross_validation(
df=df_train, val_size=val_size, test_size=test_size, n_windows=None)
nf_preds.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"cross_validation.csv"), index=False)
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
nf_preds = nf_preds.reset_index()
# 保存模型
# 生成文件名,按时间 精确到分
filename = f'{settings}--{now}.joblib'
2025-03-05 09:47:02 +08:00
# 文件名去掉冒号
filename = filename.replace(':', '-') # 替换冒号
2025-03-11 11:25:43 +08:00
dump(nf, os.path.join(config.dataset, filename))
2024-11-01 16:38:21 +08:00
else:
# glob获取dataset下最新的joblib文件
import glob
2025-03-05 09:47:02 +08:00
filename = max(glob.glob(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'*.joblib')), key=os.path.getctime)
config.logger.info('读取模型:' + filename)
2024-11-01 16:38:21 +08:00
nf = load(filename)
2025-02-13 13:23:28 +08:00
# 测试集预测
2025-03-05 09:47:02 +08:00
nf_test_preds = nf.cross_validation(
df=df_test, val_size=val_size, test_size=test_size, n_windows=None)
# 测试集预测结果保存
2025-03-05 09:47:02 +08:00
nf_test_preds.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"cross_validation.csv"), index=False)
2024-11-01 16:38:21 +08:00
df_test['ds'] = pd.to_datetime(df_test['ds'], errors='coerce')
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 进行未来时间预测
df_predict = nf.predict(df_test).reset_index()
2025-02-11 10:53:36 +08:00
# 去掉index列
if 'index' in df_predict.columns:
df_predict.drop(columns=['index'], inplace=True)
# 处理非有限值NA 或 inf将其替换为 0
df_predict = df_predict.fillna(0)
df_predict = df_predict.replace([np.inf, -np.inf], 0)
2025-03-05 09:47:02 +08:00
df_predict.astype(
{col: 'int' for col in df_predict.columns if col not in ['ds']})
2024-12-24 10:39:57 +08:00
# 添加预测时间
df_predict['created_dt'] = end_time
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 保存预测值
2025-03-11 11:25:43 +08:00
df_predict.to_csv(os.path.join(config.dataset, "predict.csv"), index=False)
2024-12-24 10:39:57 +08:00
# 将预测结果保存到数据库
2025-03-05 09:47:02 +08:00
save_to_database(sqlitedb, df_predict, 'predict', end_time)
2024-11-01 16:38:21 +08:00
# 把预测值上传到eta
if is_update_eta:
df_predict['ds'] = pd.to_datetime(df_predict['ds'])
2024-11-01 16:38:21 +08:00
dates = df_predict['ds'].dt.strftime('%Y-%m-%d')
for m in modelsindex.keys():
list = []
2025-03-05 09:47:02 +08:00
for date, value in zip(dates, df_predict[m]):
list.append({'Date': date, 'Value': value})
data['DataList'] = [list[-1]]
2024-11-01 16:38:21 +08:00
data['IndexCode'] = modelsindex[m]
data['IndexName'] = f'聚烯烃价格预测{m}模型'
2024-11-01 16:38:21 +08:00
data['Remark'] = m
2025-03-05 09:47:02 +08:00
etadata.push_data(data=data)
2024-11-01 16:38:21 +08:00
2024-12-13 18:25:23 +08:00
# return nf_test_preds
2025-03-05 09:47:02 +08:00
return
2024-11-01 16:38:21 +08:00
# 雍安环境预测评估指数
@exception_logger
2025-03-05 09:47:02 +08:00
def model_losss_yongan(sqlitedb, end_time, table_name_prefix):
global dataset
global rote
2025-03-05 09:47:02 +08:00
most_model = [sqlitedb.select_data('most_model', columns=[
'most_common_model'], order_by='ds desc', limit=1).values[0][0]]
most_model_name = most_model[0]
# 预测数据处理 predict
2025-03-11 11:25:43 +08:00
# df_combined = loadcsv(os.path.join(config.dataset,"cross_validation.csv"))
# df_combined = dateConvert(df_combined)
2025-03-05 09:47:02 +08:00
df_combined = sqlitedb.select_data(
'accuracy', where_condition=f"created_dt <= '{end_time}'")
df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要
# 删除缺失值大于80%的列
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8]
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
2025-03-05 09:47:02 +08:00
# 删除缺失值
df_combined.dropna(inplace=True)
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
# 其他列转为数值类型
2025-03-05 09:47:02 +08:00
df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in [
'CREAT_DATE', 'ds', 'created_dt']})
# 使用 groupby 和 transform 结合 lambda 函数来获取每个分组中 cutoff 的最小值,并创建一个新的列来存储这个最大值
2025-03-05 09:47:02 +08:00
df_combined['max_cutoff'] = df_combined.groupby(
'ds')['CREAT_DATE'].transform('max')
# 然后筛选出那些 cutoff 等于 max_cutoff 的行,这样就得到了每个分组中 cutoff 最大的行,并保留了其他列
2025-03-05 09:47:02 +08:00
df_combined = df_combined[df_combined['CREAT_DATE']
== df_combined['max_cutoff']]
# 删除模型生成的cutoff列
2025-03-05 09:47:02 +08:00
df_combined.drop(columns=['CREAT_DATE', 'max_cutoff', 'created_dt', 'min_within_quantile',
'max_within_quantile', 'id', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean'])
# 获取模型名称
2025-03-05 09:47:02 +08:00
modelnames = df_combined.columns.to_list()[1:]
if 'y' in modelnames:
modelnames.remove('y')
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
# 空的列表存储每个模型的MSE、RMSE、MAE、MAPE、SMAPE
cellText = []
2025-03-05 09:47:02 +08:00
# 遍历模型名称,计算模型评估指标
for model in modelnames:
modelmse = mse(df_combined['y'], df_combined[model])
modelrmse = rmse(df_combined['y'], df_combined[model])
modelmae = mae(df_combined['y'], df_combined[model])
# modelmape = mape(df_combined['y'], df_combined[model])
# modelsmape = smape(df_combined['y'], df_combined[model])
# modelr2 = r2_score(df_combined['y'], df_combined[model])
2025-03-05 09:47:02 +08:00
cellText.append([model, round(modelmse, 3), round(
modelrmse, 3), round(modelmae, 3)])
model_results3 = pd.DataFrame(
cellText, columns=['模型(Model)', '平均平方误差(MSE)', '均方根误差(RMSE)', '平均绝对误差(MAE)'])
# 按MSE降序排列
2025-03-05 09:47:02 +08:00
model_results3 = model_results3.sort_values(
by='平均平方误差(MSE)', ascending=True)
model_results3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"model_evaluation.csv"), index=False)
modelnames = model_results3['模型(Model)'].tolist()
allmodelnames = modelnames.copy()
# 保存5个最佳模型的名称
if len(modelnames) > 5:
modelnames = modelnames[0:5]
if is_fivemodels:
pass
else:
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, "best_modelnames.txt"), 'w') as f:
f.write(','.join(modelnames) + '\n')
# 预测值与真实值对比图
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(15, 10))
2025-03-05 09:47:02 +08:00
for n, model in enumerate(modelnames[:5]):
plt.subplot(3, 2, n+1)
plt.plot(df_combined3['ds'], df_combined3['y'], label='真实值')
plt.plot(df_combined3['ds'], df_combined3[model], label=model)
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
plt.title(model+'拟合')
plt.subplots_adjust(hspace=0.5)
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值与真实值对比图.png'), bbox_inches='tight')
plt.close()
2025-03-05 09:47:02 +08:00
# # 历史数据+预测数据
# # 拼接未来时间预测
2025-03-11 11:25:43 +08:00
df_predict = pd.read_csv(os.path.join(config.dataset, 'predict.csv'))
2025-03-05 09:47:02 +08:00
df_predict.drop('unique_id', inplace=True, axis=1)
df_predict.dropna(axis=1, inplace=True)
try:
2025-03-05 09:47:02 +08:00
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y-%m-%d')
except ValueError:
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y/%m/%d')
def first_row_to_database(df):
# # 取第一行数据存储到数据库中
first_row = df.head(1)
first_row['ds'] = first_row['ds'].dt.strftime('%Y-%m-%d 00:00:00')
# 将预测结果保存到数据库
if not sqlitedb.check_table_exists('trueandpredict'):
2025-03-05 09:47:02 +08:00
first_row.to_sql('trueandpredict',
sqlitedb.connection, index=False)
else:
for col in first_row.columns:
2025-03-05 09:47:02 +08:00
sqlitedb.add_column_if_not_exists(
'trueandpredict', col, 'TEXT')
for row in first_row.itertuples(index=False):
row_dict = row._asdict()
2025-03-05 09:47:02 +08:00
columns = row_dict.keys()
check_query = sqlitedb.select_data(
'trueandpredict', where_condition=f"ds = '{row.ds}'")
if len(check_query) > 0:
2025-03-05 09:47:02 +08:00
set_clause = ", ".join(
[f"{key} = '{value}'" for key, value in row_dict.items()])
sqlitedb.update_data(
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
continue
2025-03-05 09:47:02 +08:00
sqlitedb.insert_data('trueandpredict', tuple(
row_dict.values()), columns=columns)
first_row_to_database(df_predict)
2025-03-05 09:47:02 +08:00
df_combined3 = pd.concat([df_combined3, df_predict]).reset_index(drop=True)
# 计算每个模型与最佳模型的绝对误差比例根据设置的阈值rote筛选预测值显示最大最小值
names = []
names_df = df_combined3.copy()
for col in allmodelnames:
2025-03-05 09:47:02 +08:00
names_df[f'{col}-{most_model_name}-误差比例'] = abs(
names_df[col] - names_df[most_model_name]) / names_df[most_model_name]
names.append(f'{col}-{most_model_name}-误差比例')
names_df = names_df[names]
2025-03-05 09:47:02 +08:00
def add_rote_column(row):
columns = []
for r in names_df.columns:
if row[r] <= rote:
columns.append(r.split('-')[0])
return pd.Series([columns], index=['columns'])
names_df['columns'] = names_df.apply(add_rote_column, axis=1)
2025-03-05 09:47:02 +08:00
def add_upper_lower_bound(row):
print(row['columns'])
print(type(row['columns']))
# 计算上边界值
2025-03-05 09:47:02 +08:00
upper_bound = df_combined3.loc[row.name, row['columns']].max()
# 计算下边界值
2025-03-05 09:47:02 +08:00
lower_bound = df_combined3.loc[row.name, row['columns']].min()
return pd.Series([lower_bound, upper_bound], index=['min_within_quantile', 'max_within_quantile'])
2025-03-05 09:47:02 +08:00
df_combined3[['min_within_quantile', 'max_within_quantile']
] = names_df.apply(add_upper_lower_bound, axis=1)
def find_closest_values(row):
x = row.y
if x is None or np.isnan(x):
2025-03-05 09:47:02 +08:00
return pd.Series([None, None], index=['min_price', 'max_price'])
# row = row.drop('ds')
row = row.values.tolist()
row.sort()
print(row)
# x 在row中的索引
index = row.index(x)
if index == 0:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index+1], row[index+2]], index=['min_price', 'max_price'])
elif index == len(row)-1:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-2], row[index-1]], index=['min_price', 'max_price'])
else:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-1], row[index+1]], index=['min_price', 'max_price'])
def find_most_common_model():
# 最多频率的模型名称
2025-03-05 09:47:02 +08:00
min_model_max_frequency_model = df_combined3['min_model'].tail(
60).value_counts().idxmax()
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().idxmax()
if min_model_max_frequency_model == max_model_max_frequency_model:
# 取60天第二多的模型
2025-03-05 09:47:02 +08:00
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().nlargest(2).index[1]
df_predict['min_model'] = min_model_max_frequency_model
df_predict['max_model'] = max_model_max_frequency_model
df_predict['min_within_quantile'] = df_predict[min_model_max_frequency_model]
df_predict['max_within_quantile'] = df_predict[max_model_max_frequency_model]
# find_most_common_model()
df_combined3['ds'] = pd.to_datetime(df_combined3['ds'])
df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d')
df_predict2 = df_combined3.tail(horizon)
# 保存到数据库
if not sqlitedb.check_table_exists(f'{table_name_prefix}accuracy'):
2025-03-05 09:47:02 +08:00
columns = ','.join(df_combined3.columns.to_list(
)+['id', 'CREAT_DATE', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean'])
sqlitedb.create_table('accuracy', columns=columns)
existing_data = sqlitedb.select_data(table_name="accuracy")
2025-03-05 09:47:02 +08:00
if not existing_data.empty:
max_id = existing_data['id'].astype(int).max()
df_predict2['id'] = range(max_id + 1, max_id + 1 + len(df_predict2))
else:
df_predict2['id'] = range(1, 1 + len(df_predict2))
df_predict2['CREAT_DATE'] = end_time
2025-03-05 09:47:02 +08:00
save_to_database(sqlitedb, df_predict2, "accuracy", end_time)
# 上周准确率计算
2025-03-05 09:47:02 +08:00
accuracy_df = sqlitedb.select_data(table_name="accuracy")
predict_y = accuracy_df.copy()
# ids = predict_y[predict_y['min_price'].isnull()]['id'].tolist()
ids = predict_y['id'].tolist()
# 准确率基准与绘图上下界逻辑一致
# predict_y[['min_price','max_price']] = predict_y[['min_within_quantile','max_within_quantile']]
2025-03-05 09:47:02 +08:00
# 模型评估前五均值
# predict_y['min_price'] = predict_y[modelnames].mean(axis=1) -1
# predict_y['max_price'] = predict_y[modelnames].mean(axis=1) +1
2025-03-05 09:47:02 +08:00
# 模型评估前十均值
predict_y['min_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) - 1.5
predict_y['mean'] = predict_y[allmodelnames[0:10]].mean(axis=1)
2025-03-05 09:47:02 +08:00
predict_y['max_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) + 1.5
# 模型评估前十最大最小
# allmodelnames 和 predict_y 列 重复的
# allmodelnames = [col for col in allmodelnames if col in predict_y.columns]
2025-03-05 09:47:02 +08:00
# predict_y['min_price'] = predict_y[allmodelnames[0:10]].min(axis=1)
# predict_y['max_price'] = predict_y[allmodelnames[0:10]].max(axis=1)
for id in ids:
row = predict_y[predict_y['id'] == id]
try:
2025-03-05 09:47:02 +08:00
sqlitedb.update_data(
'accuracy', f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}", f"id = {id}")
except:
2025-03-11 11:25:43 +08:00
config.logger.error(f'更新accuracy表中的min_price,max_price,mean值失败row={row}')
2025-03-05 09:47:02 +08:00
df = accuracy_df.copy()
2025-03-05 09:47:02 +08:00
df['ds'] = pd.to_datetime(df['ds'])
df = df.reindex()
# 判断预测值在不在布伦特最高最低价范围内准确率为1否则为0
def is_within_range(row):
for model in allmodelnames:
if row['LOW_PRICE'] <= row[col] <= row['HIGH_PRICE']:
return 1
else:
return 0
# 定义一个函数来计算准确率
def calculate_accuracy(row):
# 比较真实最高最低,和预测最高最低 计算准确率
# 全子集情况:
if (row['max_price'] >= row['HIGH_PRICE'] and row['min_price'] <= row['LOW_PRICE']) or \
2025-03-05 09:47:02 +08:00
(row['max_price'] <= row['HIGH_PRICE'] and row['min_price'] >= row['LOW_PRICE']):
return 1
# 无交集情况:
if row['max_price'] < row['LOW_PRICE'] or \
2025-03-05 09:47:02 +08:00
row['min_price'] > row['HIGH_PRICE']:
return 0
# 有交集情况:
else:
2025-03-05 09:47:02 +08:00
sorted_prices = sorted(
[row['LOW_PRICE'], row['min_price'], row['max_price'], row['HIGH_PRICE']])
middle_diff = sorted_prices[2] - sorted_prices[1]
price_range = row['HIGH_PRICE'] - row['LOW_PRICE']
accuracy = middle_diff / price_range
return accuracy
2025-03-05 09:47:02 +08:00
columns = ['HIGH_PRICE', 'LOW_PRICE', 'min_price', 'max_price']
df[columns] = df[columns].astype(float)
df['ACCURACY'] = df.apply(calculate_accuracy, axis=1)
# df['ACCURACY'] = df.apply(is_within_range, axis=1)
# 计算准确率并保存结果
2025-03-05 09:47:02 +08:00
def _get_accuracy_rate(df, create_dates, ds_dates, endtime):
df3 = df.copy()
df3 = df3[df3['CREAT_DATE'].isin(create_dates)]
df3 = df3[df3['ds'].isin(ds_dates)]
accuracy_rote = 0
2025-03-05 09:47:02 +08:00
for i, group in df3.groupby('CREAT_DATE'):
accuracy_rote += (group['ACCURACY'].sum() /
len(group))*weight_dict[len(group)-1]
accuracy_rote = round(accuracy_rote, 2)
df4 = pd.DataFrame(columns=['开始日期', '结束日期', '准确率'])
df4.loc[len(df4)] = {'开始日期': ds_dates[0],
'结束日期': ds_dates[-1], '准确率': accuracy_rote}
df4.to_sql("accuracy_rote", con=sqlitedb.connection,
if_exists='append', index=False)
create_dates, ds_dates = get_week_date(end_time)
_get_accuracy_rate(df, create_dates, ds_dates, end_time)
def _add_abs_error_rate():
# 计算每个预测值与真实值之间的偏差率
for model in allmodelnames:
2025-03-05 09:47:02 +08:00
df_combined3[f'{model}_abs_error_rate'] = abs(
df_combined3['y'] - df_combined3[model]) / df_combined3['y']
# 获取每行对应的最小偏差率值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_values = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].min(), axis=1)
# 获取每行对应的最小偏差率值对应的列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].idxmin(), axis=1)
# 将列名索引转换为列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
lambda x: x.split('_')[0])
# 获取最小偏差率对应的模型的预测值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_predictions = df_combined3.apply(
lambda row: row[min_abs_error_rate_column_name[row.name]], axis=1)
# 将最小偏差率对应的模型的预测值添加到DataFrame中
df_combined3['min_abs_error_rate_prediction'] = min_abs_error_rate_predictions
df_combined3['min_abs_error_rate_column_name'] = min_abs_error_rate_column_name
# _add_abs_error_rate()
# 判断 df 的数值列转为float
for col in df_combined3.columns:
try:
if col != 'ds':
df_combined3[col] = df_combined3[col].astype(float)
df_combined3[col] = df_combined3[col].round(2)
except ValueError:
pass
2025-03-05 09:47:02 +08:00
df_combined3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"testandpredict_groupby.csv"), index=False)
2025-03-05 09:47:02 +08:00
# 历史价格+预测价格
sqlitedb.drop_table('testandpredict_groupby')
2025-03-05 09:47:02 +08:00
df_combined3.to_sql('testandpredict_groupby',
sqlitedb.connection, index=False)
def _plt_predict_ture(df):
lens = df.shape[0] if df.shape[0] < 180 else 90
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
# 历史价格
plt.figure(figsize=(20, 10))
plt.plot(df['ds'], df['y'], label='真实值')
# 颜色填充
2025-03-05 09:47:02 +08:00
plt.fill_between(df['ds'], df['max_within_quantile'],
df['min_within_quantile'], alpha=0.2)
markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
random_marker = random.choice(markers)
for model in modelnames:
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'][-horizon:], df[model][-horizon:],
label=model, marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
2025-03-05 09:47:02 +08:00
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值.png'), bbox_inches='tight')
plt.close()
def _plt_modeltopten_predict_ture(df):
df['max_cutoff'] = df.groupby('ds')['CREAT_DATE'].transform('max')
df = df[df['CREAT_DATE'] == df['max_cutoff']]
df['mean'] = df['mean'].astype(float)
lens = df.shape[0] if df.shape[0] < 180 else 180
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
# 历史价格
plt.figure(figsize=(20, 10))
plt.plot(df['ds'], df['y'], label='真实值')
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df['mean'], label='模型前十均值',
linestyle='--', color='orange')
# 颜色填充
plt.fill_between(df['ds'], df['max_price'], df['min_price'], alpha=0.2)
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值1.png'),
2025-03-05 09:47:02 +08:00
bbox_inches='tight')
plt.close()
2025-03-05 09:47:02 +08:00
def _plt_predict_table(df):
# 预测值表格
fig, ax = plt.subplots(figsize=(20, 6))
ax.axis('off') # 关闭坐标轴
# 数值保留2位小数
df = df.round(2)
df = df[-horizon:]
2025-03-05 09:47:02 +08:00
df['Day'] = [f'Day_{i}' for i in range(1, horizon+1)]
# Day列放到最前面
df = df[['Day'] + list(df.columns[:-1])]
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=df.values,
colLabels=df.columns, loc='center')
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值表格.png'), bbox_inches='tight')
plt.close()
2025-03-05 09:47:02 +08:00
def _plt_model_results3():
# 可视化评估结果
plt.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots(figsize=(20, 10))
ax.axis('off') # 关闭坐标轴
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=model_results3.values,
colLabels=model_results3.columns, loc='center')
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '模型评估.png'), bbox_inches='tight')
plt.close()
_plt_predict_ture(df_combined3)
_plt_modeltopten_predict_ture(df_combined4)
_plt_predict_table(df_combined3)
_plt_model_results3()
return model_results3
2025-03-05 09:47:02 +08:00
2024-11-21 13:33:07 +08:00
# 原油计算预测评估指数
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def model_losss(sqlitedb, end_time):
2024-11-01 16:38:21 +08:00
global dataset
global rote
2025-03-03 17:54:15 +08:00
# 从数据库取最佳模型,如果没有表,先自定义空,后面根据模型评估取第一个
try:
2025-03-05 09:47:02 +08:00
most_model = [sqlitedb.select_data('most_model', columns=[
'most_common_model'], order_by='ds desc', limit=1).values[0][0]]
2025-03-03 17:54:15 +08:00
most_model_name = most_model[0]
except:
most_model_name = ''
2024-12-24 15:10:18 +08:00
# 预测数据处理 predict
2025-03-05 09:47:02 +08:00
2025-03-03 17:54:15 +08:00
try:
2025-03-05 09:47:02 +08:00
df_combined = sqlitedb.select_data(
'accuracy', where_condition=f"created_dt <= '{end_time}'")
2025-03-07 13:28:10 +08:00
if len(df_combined) < 100:
2025-03-03 17:54:15 +08:00
len(df_combined) + ''
except:
2025-03-07 13:28:10 +08:00
df_combined = loadcsv(os.path.join(config.dataset, "cross_validation.csv"))
2025-03-03 17:54:15 +08:00
df_combined = dateConvert(df_combined)
df_combined['CREAT_DATE'] = df_combined['cutoff']
df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要
# 删除缺失值大于80%的列
config.logger.info(df_combined.shape)
df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8]
config.logger.info(df_combined.shape)
2025-03-05 09:47:02 +08:00
# 删除缺失值
df_combined.dropna(inplace=True)
config.logger.info(df_combined.shape)
2024-12-24 15:10:18 +08:00
# 其他列转为数值类型
2025-03-05 09:47:02 +08:00
df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in [
'CREAT_DATE', 'ds', 'created_dt', 'cutoff']})
2024-12-24 15:10:18 +08:00
# 使用 groupby 和 transform 结合 lambda 函数来获取每个分组中 cutoff 的最小值,并创建一个新的列来存储这个最大值
2025-03-05 09:47:02 +08:00
df_combined['max_cutoff'] = df_combined.groupby(
'ds')['CREAT_DATE'].transform('max')
2024-12-24 15:10:18 +08:00
# 然后筛选出那些 cutoff 等于 max_cutoff 的行,这样就得到了每个分组中 cutoff 最大的行,并保留了其他列
2025-03-05 09:47:02 +08:00
df_combined = df_combined[df_combined['CREAT_DATE']
== df_combined['max_cutoff']]
2024-12-24 15:10:18 +08:00
# 删除模型生成的cutoff列
2025-03-05 09:47:02 +08:00
df_combined.drop(columns=['CREAT_DATE', 'max_cutoff', 'created_dt', 'min_within_quantile', 'max_within_quantile',
'id', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean', 'cutoff'], inplace=True, errors='ignore')
2024-12-24 15:10:18 +08:00
# 获取模型名称
2025-03-05 09:47:02 +08:00
modelnames = df_combined.columns.to_list()[1:]
2024-12-24 15:10:18 +08:00
if 'y' in modelnames:
modelnames.remove('y')
2025-03-03 17:54:15 +08:00
if 'cutoff' in modelnames:
modelnames.remove('cutoff')
2025-03-07 13:28:10 +08:00
if 'ds' in modelnames:
modelnames.remove('ds')
2024-12-24 15:10:18 +08:00
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
# 空的列表存储每个模型的MSE、RMSE、MAE、MAPE、SMAPE
cellText = []
2025-03-05 09:47:02 +08:00
# 遍历模型名称,计算模型评估指标
2024-12-24 15:10:18 +08:00
for model in modelnames:
modelmse = mse(df_combined['y'], df_combined[model])
modelrmse = rmse(df_combined['y'], df_combined[model])
modelmae = mae(df_combined['y'], df_combined[model])
# modelmape = mape(df_combined['y'], df_combined[model])
# modelsmape = smape(df_combined['y'], df_combined[model])
# modelr2 = r2_score(df_combined['y'], df_combined[model])
2025-03-05 09:47:02 +08:00
cellText.append([model, round(modelmse, 3), round(
modelrmse, 3), round(modelmae, 3)])
model_results3 = pd.DataFrame(
cellText, columns=['模型(Model)', '平均平方误差(MSE)', '均方根误差(RMSE)', '平均绝对误差(MAE)'])
2024-12-24 15:10:18 +08:00
# 按MSE降序排列
2025-03-05 09:47:02 +08:00
model_results3 = model_results3.sort_values(
by='平均平方误差(MSE)', ascending=True)
model_results3.to_csv(os.path.join(
config.dataset, "model_evaluation.csv"), index=False)
2024-12-24 15:10:18 +08:00
modelnames = model_results3['模型(Model)'].tolist()
2025-03-03 17:54:15 +08:00
most_model_name = modelnames[0]
2024-12-24 15:10:18 +08:00
allmodelnames = modelnames.copy()
# 保存5个最佳模型的名称
if len(modelnames) > 5:
modelnames = modelnames[0:5]
if config.is_fivemodels:
2024-12-24 15:10:18 +08:00
pass
else:
with open(os.path.join(config.dataset, "best_modelnames.txt"), 'w') as f:
2024-12-24 15:10:18 +08:00
f.write(','.join(modelnames) + '\n')
# 预测值与真实值对比图
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(15, 10))
2025-03-05 09:47:02 +08:00
for n, model in enumerate(modelnames[:5]):
2024-12-24 15:10:18 +08:00
plt.subplot(3, 2, n+1)
plt.plot(df_combined3['ds'], df_combined3['y'], label='真实值')
plt.plot(df_combined3['ds'], df_combined3[model], label=model)
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
plt.title(model+'拟合')
plt.subplots_adjust(hspace=0.5)
plt.savefig(os.path.join(config.dataset, '预测值与真实值对比图.png'),
bbox_inches='tight')
2024-12-24 15:10:18 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
2024-12-24 15:10:18 +08:00
# # 历史数据+预测数据
# # 拼接未来时间预测
df_predict = pd.read_csv(os.path.join(config.dataset, 'predict.csv'))
2025-03-05 09:47:02 +08:00
df_predict.drop('unique_id', inplace=True, axis=1)
df_predict.dropna(axis=1, inplace=True)
2024-12-24 15:10:18 +08:00
try:
2025-03-05 09:47:02 +08:00
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y-%m-%d')
except ValueError:
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y/%m/%d')
2024-12-30 14:00:16 +08:00
def first_row_to_database(df):
# # 取第一行数据存储到数据库中
first_row = df.head(1)
first_row['ds'] = first_row['ds'].dt.strftime('%Y-%m-%d 00:00:00')
# 将预测结果保存到数据库
if not sqlitedb.check_table_exists('trueandpredict'):
2025-03-05 09:47:02 +08:00
first_row.to_sql('trueandpredict',
sqlitedb.connection, index=False)
2024-12-30 14:00:16 +08:00
else:
for col in first_row.columns:
2025-03-05 09:47:02 +08:00
sqlitedb.add_column_if_not_exists(
'trueandpredict', col, 'TEXT')
2024-12-30 14:00:16 +08:00
for row in first_row.itertuples(index=False):
row_dict = row._asdict()
2025-03-05 09:47:02 +08:00
columns = row_dict.keys()
check_query = sqlitedb.select_data(
'trueandpredict', where_condition=f"ds = '{row.ds}'")
2024-12-30 14:00:16 +08:00
if len(check_query) > 0:
2025-03-05 09:47:02 +08:00
set_clause = ", ".join(
[f"{key} = '{value}'" for key, value in row_dict.items()])
sqlitedb.update_data(
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
2024-12-30 14:00:16 +08:00
continue
2025-03-05 09:47:02 +08:00
sqlitedb.insert_data('trueandpredict', tuple(
row_dict.values()), columns=columns)
2024-12-30 14:00:16 +08:00
first_row_to_database(df_predict)
2025-03-05 09:47:02 +08:00
2024-12-24 15:10:18 +08:00
df_combined3 = pd.concat([df_combined3, df_predict]).reset_index(drop=True)
# 计算每个模型与最佳模型的绝对误差比例根据设置的阈值rote筛选预测值显示最大最小值
names = []
names_df = df_combined3.copy()
for col in allmodelnames:
2025-03-05 09:47:02 +08:00
names_df[f'{col}-{most_model_name}-误差比例'] = abs(
names_df[col] - names_df[most_model_name]) / names_df[most_model_name]
2024-12-24 15:10:18 +08:00
names.append(f'{col}-{most_model_name}-误差比例')
names_df = names_df[names]
2025-03-05 09:47:02 +08:00
2024-12-24 15:10:18 +08:00
def add_rote_column(row):
columns = []
for r in names_df.columns:
if row[r] <= config.rote:
2024-12-24 15:10:18 +08:00
columns.append(r.split('-')[0])
return pd.Series([columns], index=['columns'])
names_df['columns'] = names_df.apply(add_rote_column, axis=1)
2025-03-05 09:47:02 +08:00
2024-12-24 15:10:18 +08:00
def add_upper_lower_bound(row):
print(row['columns'])
print(type(row['columns']))
2024-12-24 15:10:18 +08:00
# 计算上边界值
2025-03-05 09:47:02 +08:00
upper_bound = df_combined3.loc[row.name, row['columns']].max()
2024-12-24 15:10:18 +08:00
# 计算下边界值
2025-03-05 09:47:02 +08:00
lower_bound = df_combined3.loc[row.name, row['columns']].min()
2024-12-24 15:10:18 +08:00
return pd.Series([lower_bound, upper_bound], index=['min_within_quantile', 'max_within_quantile'])
2025-03-05 09:47:02 +08:00
df_combined3[['min_within_quantile', 'max_within_quantile']
] = names_df.apply(add_upper_lower_bound, axis=1)
2024-12-24 15:10:18 +08:00
def find_closest_values(row):
x = row.y
if x is None or np.isnan(x):
2025-03-05 09:47:02 +08:00
return pd.Series([None, None], index=['min_price', 'max_price'])
2024-12-24 15:10:18 +08:00
# row = row.drop('ds')
row = row.values.tolist()
row.sort()
print(row)
# x 在row中的索引
index = row.index(x)
if index == 0:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index+1], row[index+2]], index=['min_price', 'max_price'])
2024-12-24 15:10:18 +08:00
elif index == len(row)-1:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-2], row[index-1]], index=['min_price', 'max_price'])
2024-12-24 15:10:18 +08:00
else:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-1], row[index+1]], index=['min_price', 'max_price'])
2024-12-24 15:10:18 +08:00
def find_most_common_model():
# 最多频率的模型名称
2025-03-05 09:47:02 +08:00
min_model_max_frequency_model = df_combined3['min_model'].tail(
60).value_counts().idxmax()
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().idxmax()
2024-12-24 15:10:18 +08:00
if min_model_max_frequency_model == max_model_max_frequency_model:
# 取60天第二多的模型
2025-03-05 09:47:02 +08:00
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().nlargest(2).index[1]
2024-12-24 15:10:18 +08:00
df_predict['min_model'] = min_model_max_frequency_model
df_predict['max_model'] = max_model_max_frequency_model
df_predict['min_within_quantile'] = df_predict[min_model_max_frequency_model]
df_predict['max_within_quantile'] = df_predict[max_model_max_frequency_model]
# find_most_common_model()
df_combined3['ds'] = pd.to_datetime(df_combined3['ds'])
df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d')
df_predict2 = df_combined3.tail(config.horizon)
2024-12-24 15:10:18 +08:00
# 保存到数据库
if not sqlitedb.check_table_exists('accuracy'):
2025-03-05 09:47:02 +08:00
columns = ','.join(df_combined3.columns.to_list(
)+['id', 'CREAT_DATE', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean'])
sqlitedb.create_table('accuracy', columns=columns)
existing_data = sqlitedb.select_data(table_name="accuracy")
2024-12-24 15:10:18 +08:00
2025-03-05 09:47:02 +08:00
if not existing_data.empty:
2024-12-24 15:10:18 +08:00
max_id = existing_data['id'].astype(int).max()
df_predict2['id'] = range(max_id + 1, max_id + 1 + len(df_predict2))
else:
df_predict2['id'] = range(1, 1 + len(df_predict2))
df_predict2['CREAT_DATE'] = end_time
2025-03-05 09:47:02 +08:00
save_to_database(sqlitedb, df_predict2, "accuracy", end_time)
2024-12-25 16:13:22 +08:00
2024-12-24 15:10:18 +08:00
# 上周准确率计算
2025-03-05 09:47:02 +08:00
accuracy_df = sqlitedb.select_data(table_name="accuracy")
2024-12-27 14:15:20 +08:00
predict_y = accuracy_df.copy()
2024-12-24 15:10:18 +08:00
# ids = predict_y[predict_y['min_price'].isnull()]['id'].tolist()
ids = predict_y['id'].tolist()
# 准确率基准与绘图上下界逻辑一致
# predict_y[['min_price','max_price']] = predict_y[['min_within_quantile','max_within_quantile']]
2025-03-05 09:47:02 +08:00
# 模型评估前五均值
2024-12-24 16:57:03 +08:00
# predict_y['min_price'] = predict_y[modelnames].mean(axis=1) -1
# predict_y['max_price'] = predict_y[modelnames].mean(axis=1) +1
2025-03-05 09:47:02 +08:00
# 模型评估前十均值
predict_y['min_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) - 1.5
2024-12-27 14:15:20 +08:00
predict_y['mean'] = predict_y[allmodelnames[0:10]].mean(axis=1)
2025-03-05 09:47:02 +08:00
predict_y['max_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) + 1.5
2024-12-24 15:10:18 +08:00
# 模型评估前十最大最小
# allmodelnames 和 predict_y 列 重复的
# allmodelnames = [col for col in allmodelnames if col in predict_y.columns]
2025-03-05 09:47:02 +08:00
# predict_y['min_price'] = predict_y[allmodelnames[0:10]].min(axis=1)
2024-12-24 15:10:18 +08:00
# predict_y['max_price'] = predict_y[allmodelnames[0:10]].max(axis=1)
for id in ids:
row = predict_y[predict_y['id'] == id]
try:
2025-03-05 09:47:02 +08:00
sqlitedb.update_data(
'accuracy', f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}", f"id = {id}")
2024-12-24 15:10:18 +08:00
except:
config.loggererror(
f'更新accuracy表中的min_price,max_price,mean值失败row={row}')
2025-03-05 09:47:02 +08:00
2024-12-27 14:15:20 +08:00
df = accuracy_df.copy()
2025-03-05 09:47:02 +08:00
df['ds'] = pd.to_datetime(df['ds'])
2024-12-24 15:10:18 +08:00
df = df.reindex()
# 判断预测值在不在布伦特最高最低价范围内准确率为1否则为0
def is_within_range(row):
for model in allmodelnames:
if row['LOW_PRICE'] <= row[col] <= row['HIGH_PRICE']:
return 1
else:
return 0
2024-12-25 16:13:22 +08:00
# 定义一个函数来计算准确率
2024-12-24 15:10:18 +08:00
def calculate_accuracy(row):
# 比较真实最高最低,和预测最高最低 计算准确率
2024-12-24 15:10:18 +08:00
# 全子集情况:
if (row['max_price'] >= row['HIGH_PRICE'] and row['min_price'] <= row['LOW_PRICE']) or \
2025-03-05 09:47:02 +08:00
(row['max_price'] <= row['HIGH_PRICE'] and row['min_price'] >= row['LOW_PRICE']):
return 1
2024-12-24 15:10:18 +08:00
# 无交集情况:
if row['max_price'] < row['LOW_PRICE'] or \
2025-03-05 09:47:02 +08:00
row['min_price'] > row['HIGH_PRICE']:
2024-12-24 15:10:18 +08:00
return 0
# 有交集情况:
else:
2025-03-05 09:47:02 +08:00
sorted_prices = sorted(
[row['LOW_PRICE'], row['min_price'], row['max_price'], row['HIGH_PRICE']])
2024-12-24 15:10:18 +08:00
middle_diff = sorted_prices[2] - sorted_prices[1]
price_range = row['HIGH_PRICE'] - row['LOW_PRICE']
accuracy = middle_diff / price_range
return accuracy
2025-03-05 09:47:02 +08:00
columns = ['HIGH_PRICE', 'LOW_PRICE', 'min_price', 'max_price']
2024-12-24 15:10:18 +08:00
df[columns] = df[columns].astype(float)
df['ACCURACY'] = df.apply(calculate_accuracy, axis=1)
# df['ACCURACY'] = df.apply(is_within_range, axis=1)
2024-12-25 16:13:22 +08:00
2024-12-24 15:10:18 +08:00
# 计算准确率并保存结果
2025-03-05 09:47:02 +08:00
def _get_accuracy_rate(df, create_dates, ds_dates):
2024-12-24 15:10:18 +08:00
df3 = df.copy()
2024-12-24 16:57:03 +08:00
df3 = df3[df3['CREAT_DATE'].isin(create_dates)]
df3 = df3[df3['ds'].isin(ds_dates)]
2024-12-24 15:10:18 +08:00
accuracy_rote = 0
2025-03-05 09:47:02 +08:00
for i, group in df3.groupby('CREAT_DATE'):
accuracy_rote += (group['ACCURACY'].sum() /
len(group))*weight_dict[len(group)-1]
accuracy_rote = round(accuracy_rote, 2)
df4 = pd.DataFrame(columns=['开始日期', '结束日期', '准确率'])
df4.loc[len(df4)] = {'开始日期': ds_dates[0],
'结束日期': ds_dates[-1], '准确率': accuracy_rote}
df4.to_sql("accuracy_rote", con=sqlitedb.connection,
if_exists='append', index=False)
create_dates, ds_dates = get_week_date(end_time)
_get_accuracy_rate(df, create_dates, ds_dates)
def _add_abs_error_rate():
# 计算每个预测值与真实值之间的偏差率
for model in allmodelnames:
2025-03-05 09:47:02 +08:00
df_combined3[f'{model}_abs_error_rate'] = abs(
df_combined3['y'] - df_combined3[model]) / df_combined3['y']
# 获取每行对应的最小偏差率值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_values = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].min(), axis=1)
# 获取每行对应的最小偏差率值对应的列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].idxmin(), axis=1)
# 将列名索引转换为列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
lambda x: x.split('_')[0])
# 获取最小偏差率对应的模型的预测值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_predictions = df_combined3.apply(
lambda row: row[min_abs_error_rate_column_name[row.name]], axis=1)
# 将最小偏差率对应的模型的预测值添加到DataFrame中
df_combined3['min_abs_error_rate_prediction'] = min_abs_error_rate_predictions
df_combined3['min_abs_error_rate_column_name'] = min_abs_error_rate_column_name
# _add_abs_error_rate()
# 判断 df 的数值列转为float
for col in df_combined3.columns:
try:
if col != 'ds':
df_combined3[col] = df_combined3[col].astype(float)
df_combined3[col] = df_combined3[col].round(2)
except ValueError:
pass
2025-03-05 09:47:02 +08:00
df_combined3.to_csv(os.path.join(
config.dataset, "testandpredict_groupby.csv"), index=False)
2025-03-05 09:47:02 +08:00
# 历史价格+预测价格
sqlitedb.drop_table('testandpredict_groupby')
2025-03-05 09:47:02 +08:00
df_combined3.to_sql('testandpredict_groupby',
sqlitedb.connection, index=False)
def _plt_predict_ture(df):
lens = df.shape[0] if df.shape[0] < 180 else 90
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
# 历史价格
plt.figure(figsize=(20, 10))
# 时间格式更改
df['ds'] = pd.to_datetime(df['ds'])
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df['y'], label='真实值')
# 颜色填充
2025-03-05 09:47:02 +08:00
plt.fill_between(df['ds'], df['max_within_quantile'],
df['min_within_quantile'], alpha=0.2)
2024-12-27 14:15:20 +08:00
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
2024-12-27 14:15:20 +08:00
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
2025-03-03 17:54:15 +08:00
# for model in most_model:
# plt.plot(df['ds'], df[model], label=model,marker='o')
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df[most_model_name], label=model, marker='o')
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
# 设置横轴日期格式为年-月-日
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# 自动设置横轴日期显示
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
plt.xticks(rotation=45) # 日期标签旋转45度防止重叠
plt.ylabel('价格')
2025-03-05 09:47:02 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值.png'),
bbox_inches='tight')
plt.close()
def _plt_modeltopten_predict_ture(df):
df['ds'] = pd.to_datetime(df['ds'])
df['max_cutoff'] = df.groupby('ds')['CREAT_DATE'].transform('max')
df = df[df['CREAT_DATE'] == df['max_cutoff']]
df['mean'] = df['mean'].astype(float)
lens = df.shape[0] if df.shape[0] < 180 else 180
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
# 历史价格
plt.figure(figsize=(20, 10))
plt.plot(df['ds'], df['y'], label='真实值')
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df['mean'], label='模型前十均值',
linestyle='--', color='orange')
# 颜色填充
plt.fill_between(df['ds'], df['max_price'], df['min_price'], alpha=0.2)
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
# 自动设置横轴日期显示
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
plt.xticks(rotation=45) # 日期标签旋转45度防止重叠
2025-03-05 09:47:02 +08:00
plt.ylabel('价格')
plt.savefig(os.path.join(config.dataset, '历史价格-预测值1.png'),
2025-03-05 09:47:02 +08:00
bbox_inches='tight')
plt.close()
2025-03-05 09:47:02 +08:00
def _plt_predict_table(df):
# 预测值表格
fig, ax = plt.subplots(figsize=(20, 6))
ax.axis('off') # 关闭坐标轴
# 数值保留2位小数
df = df.round(2)
df = df[-horizon:]
2025-03-05 09:47:02 +08:00
df['Day'] = [f'Day_{i}' for i in range(1, horizon+1)]
# Day列放到最前面
df = df[['Day'] + list(df.columns[:-1])]
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=df.values,
colLabels=df.columns, loc='center')
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
plt.savefig(os.path.join(config.dataset, '预测值表格.png'),
bbox_inches='tight')
plt.close()
2025-03-05 09:47:02 +08:00
def _plt_model_results3():
# 可视化评估结果
plt.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots(figsize=(20, 10))
ax.axis('off') # 关闭坐标轴
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=model_results3.values,
colLabels=model_results3.columns, loc='center')
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
plt.savefig(os.path.join(config.dataset, '模型评估.png'),
bbox_inches='tight')
plt.close()
2025-03-03 17:54:15 +08:00
# _plt_predict_ture(df_combined3)
# _plt_modeltopten_predict_ture(df_combined4)
# _plt_predict_table(df_combined3)
# _plt_model_results3()
return model_results3
2025-02-11 16:31:52 +08:00
# 聚烯烃计算预测评估指数
@exception_logger
2025-03-05 09:47:02 +08:00
def model_losss_juxitingbak(sqlitedb, end_time):
2025-02-11 16:31:52 +08:00
global dataset
global rote
2025-03-05 09:47:02 +08:00
most_model = [sqlitedb.select_data('most_model', columns=[
'most_common_model'], order_by='ds desc', limit=1).values[0][0]]
2025-02-11 16:31:52 +08:00
most_model_name = most_model[0]
# 预测数据处理 predict
2025-03-11 11:25:43 +08:00
df_combined = loadcsv(os.path.join(config.dataset, "cross_validation.csv"))
2025-03-05 09:47:02 +08:00
df_combined.drop(columns=['cutoff'], inplace=True)
2025-02-11 16:31:52 +08:00
df_combined['CREAT_DATE'] = end_time
df_combined = dateConvert(df_combined)
# df_combined = sqlitedb.select_data('accuracy',where_condition=f"created_dt <= '{end_time}'")
df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要
# 删除缺失值大于80%的列
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
2025-02-11 16:31:52 +08:00
df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8]
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
2025-03-05 09:47:02 +08:00
# 删除缺失值
df_combined.dropna(inplace=True)
2025-03-11 11:25:43 +08:00
config.logger.info(df_combined.shape)
2025-02-11 16:31:52 +08:00
# 其他列转为数值类型
2025-03-05 09:47:02 +08:00
df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in [
'CREAT_DATE', 'ds', 'created_dt']})
2025-02-11 16:31:52 +08:00
# 使用 groupby 和 transform 结合 lambda 函数来获取每个分组中 cutoff 的最小值,并创建一个新的列来存储这个最大值
2025-03-05 09:47:02 +08:00
df_combined['max_cutoff'] = df_combined.groupby(
'ds')['CREAT_DATE'].transform('max')
2025-02-11 16:31:52 +08:00
# 然后筛选出那些 cutoff 等于 max_cutoff 的行,这样就得到了每个分组中 cutoff 最大的行,并保留了其他列
2025-03-05 09:47:02 +08:00
df_combined = df_combined[df_combined['CREAT_DATE']
== df_combined['max_cutoff']]
2025-02-11 16:31:52 +08:00
# 删除模型生成的cutoff列
2025-03-05 09:47:02 +08:00
df_combined.drop(columns=['CREAT_DATE', 'max_cutoff', 'created_dt', 'min_within_quantile',
'max_within_quantile', 'id', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean'], inplace=True)
2025-02-11 16:31:52 +08:00
# 获取模型名称
2025-03-05 09:47:02 +08:00
modelnames = df_combined.columns.to_list()[1:]
2025-02-11 16:31:52 +08:00
if 'y' in modelnames:
modelnames.remove('y')
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
# 空的列表存储每个模型的MSE、RMSE、MAE、MAPE、SMAPE
cellText = []
2025-03-05 09:47:02 +08:00
# 遍历模型名称,计算模型评估指标
2025-02-11 16:31:52 +08:00
for model in modelnames:
modelmse = mse(df_combined['y'], df_combined[model])
modelrmse = rmse(df_combined['y'], df_combined[model])
modelmae = mae(df_combined['y'], df_combined[model])
# modelmape = mape(df_combined['y'], df_combined[model])
# modelsmape = smape(df_combined['y'], df_combined[model])
# modelr2 = r2_score(df_combined['y'], df_combined[model])
2025-03-05 09:47:02 +08:00
cellText.append([model, round(modelmse, 3), round(
modelrmse, 3), round(modelmae, 3)])
model_results3 = pd.DataFrame(
cellText, columns=['模型(Model)', '平均平方误差(MSE)', '均方根误差(RMSE)', '平均绝对误差(MAE)'])
2025-02-11 16:31:52 +08:00
# 按MSE降序排列
2025-03-05 09:47:02 +08:00
model_results3 = model_results3.sort_values(
by='平均平方误差(MSE)', ascending=True)
model_results3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"model_evaluation.csv"), index=False)
2025-02-11 16:31:52 +08:00
modelnames = model_results3['模型(Model)'].tolist()
allmodelnames = modelnames.copy()
# 保存5个最佳模型的名称
if len(modelnames) > 5:
modelnames = modelnames[0:5]
if is_fivemodels:
pass
else:
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, "best_modelnames.txt"), 'w') as f:
2025-02-11 16:31:52 +08:00
f.write(','.join(modelnames) + '\n')
# 预测值与真实值对比图
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(15, 10))
2025-03-05 09:47:02 +08:00
for n, model in enumerate(modelnames[:5]):
2025-02-11 16:31:52 +08:00
plt.subplot(3, 2, n+1)
plt.plot(df_combined3['ds'], df_combined3['y'], label='真实值')
plt.plot(df_combined3['ds'], df_combined3[model], label=model)
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
plt.title(model+'拟合')
plt.subplots_adjust(hspace=0.5)
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值与真实值对比图.png'), bbox_inches='tight')
2025-02-11 16:31:52 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
# # 历史数据+预测数据
# # 拼接未来时间预测
2025-03-11 11:25:43 +08:00
df_predict = pd.read_csv(os.path.join(config.dataset, 'predict.csv'))
2025-03-05 09:47:02 +08:00
df_predict.drop('unique_id', inplace=True, axis=1)
df_predict.dropna(axis=1, inplace=True)
2025-02-11 16:31:52 +08:00
try:
2025-03-05 09:47:02 +08:00
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y-%m-%d')
except ValueError:
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y/%m/%d')
2025-02-11 16:31:52 +08:00
def first_row_to_database(df):
# # 取第一行数据存储到数据库中
first_row = df.head(1)
first_row['ds'] = first_row['ds'].dt.strftime('%Y-%m-%d 00:00:00')
# 将预测结果保存到数据库
if not sqlitedb.check_table_exists('trueandpredict'):
2025-03-05 09:47:02 +08:00
first_row.to_sql('trueandpredict',
sqlitedb.connection, index=False)
2025-02-11 16:31:52 +08:00
else:
for col in first_row.columns:
2025-03-05 09:47:02 +08:00
sqlitedb.add_column_if_not_exists(
'trueandpredict', col, 'TEXT')
2025-02-11 16:31:52 +08:00
for row in first_row.itertuples(index=False):
row_dict = row._asdict()
2025-03-05 09:47:02 +08:00
columns = row_dict.keys()
check_query = sqlitedb.select_data(
'trueandpredict', where_condition=f"ds = '{row.ds}'")
2025-02-11 16:31:52 +08:00
if len(check_query) > 0:
2025-03-05 09:47:02 +08:00
set_clause = ", ".join(
[f"{key} = '{value}'" for key, value in row_dict.items()])
sqlitedb.update_data(
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
2025-02-11 16:31:52 +08:00
continue
2025-03-05 09:47:02 +08:00
sqlitedb.insert_data('trueandpredict', tuple(
row_dict.values()), columns=columns)
2025-02-11 16:31:52 +08:00
first_row_to_database(df_predict)
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
df_combined3 = pd.concat([df_combined3, df_predict]).reset_index(drop=True)
# 计算每个模型与最佳模型的绝对误差比例根据设置的阈值rote筛选预测值显示最大最小值
names = []
names_df = df_combined3.copy()
for col in allmodelnames:
2025-03-05 09:47:02 +08:00
names_df[f'{col}-{most_model_name}-误差比例'] = abs(
names_df[col] - names_df[most_model_name]) / names_df[most_model_name]
2025-02-11 16:31:52 +08:00
names.append(f'{col}-{most_model_name}-误差比例')
names_df = names_df[names]
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
def add_rote_column(row):
columns = []
for r in names_df.columns:
if row[r] <= rote:
columns.append(r.split('-')[0])
return pd.Series([columns], index=['columns'])
names_df['columns'] = names_df.apply(add_rote_column, axis=1)
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
def add_upper_lower_bound(row):
print(row['columns'])
print(type(row['columns']))
# 计算上边界值
2025-03-05 09:47:02 +08:00
upper_bound = df_combined3.loc[row.name, row['columns']].max()
2025-02-11 16:31:52 +08:00
# 计算下边界值
2025-03-05 09:47:02 +08:00
lower_bound = df_combined3.loc[row.name, row['columns']].min()
2025-02-11 16:31:52 +08:00
return pd.Series([lower_bound, upper_bound], index=['min_within_quantile', 'max_within_quantile'])
2025-03-05 09:47:02 +08:00
df_combined3[['min_within_quantile', 'max_within_quantile']
] = names_df.apply(add_upper_lower_bound, axis=1)
2025-02-11 16:31:52 +08:00
def find_closest_values(row):
x = row.y
if x is None or np.isnan(x):
2025-03-05 09:47:02 +08:00
return pd.Series([None, None], index=['min_price', 'max_price'])
2025-02-11 16:31:52 +08:00
# row = row.drop('ds')
row = row.values.tolist()
row.sort()
print(row)
# x 在row中的索引
index = row.index(x)
if index == 0:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index+1], row[index+2]], index=['min_price', 'max_price'])
2025-02-11 16:31:52 +08:00
elif index == len(row)-1:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-2], row[index-1]], index=['min_price', 'max_price'])
2025-02-11 16:31:52 +08:00
else:
2025-03-05 09:47:02 +08:00
return pd.Series([row[index-1], row[index+1]], index=['min_price', 'max_price'])
2025-02-11 16:31:52 +08:00
def find_most_common_model():
# 最多频率的模型名称
2025-03-05 09:47:02 +08:00
min_model_max_frequency_model = df_combined3['min_model'].tail(
60).value_counts().idxmax()
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().idxmax()
2025-02-11 16:31:52 +08:00
if min_model_max_frequency_model == max_model_max_frequency_model:
# 取60天第二多的模型
2025-03-05 09:47:02 +08:00
max_model_max_frequency_model = df_combined3['max_model'].tail(
60).value_counts().nlargest(2).index[1]
2025-02-11 16:31:52 +08:00
df_predict['min_model'] = min_model_max_frequency_model
df_predict['max_model'] = max_model_max_frequency_model
df_predict['min_within_quantile'] = df_predict[min_model_max_frequency_model]
df_predict['max_within_quantile'] = df_predict[max_model_max_frequency_model]
# find_most_common_model()
df_combined3['ds'] = pd.to_datetime(df_combined3['ds'])
df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d')
df_predict2 = df_combined3.tail(horizon)
# 保存到数据库
if not sqlitedb.check_table_exists('accuracy'):
2025-03-05 09:47:02 +08:00
columns = ','.join(df_combined3.columns.to_list(
)+['id', 'CREAT_DATE', 'min_price', 'max_price', 'LOW_PRICE', 'HIGH_PRICE', 'mean'])
sqlitedb.create_table('accuracy', columns=columns)
existing_data = sqlitedb.select_data(table_name="accuracy")
2025-02-11 16:31:52 +08:00
2025-03-05 09:47:02 +08:00
if not existing_data.empty:
2025-02-11 16:31:52 +08:00
max_id = existing_data['id'].astype(int).max()
df_predict2['id'] = range(max_id + 1, max_id + 1 + len(df_predict2))
else:
df_predict2['id'] = range(1, 1 + len(df_predict2))
df_predict2['CREAT_DATE'] = end_time
2025-03-05 09:47:02 +08:00
save_to_database(sqlitedb, df_predict2, "accuracy", end_time)
2025-02-11 16:31:52 +08:00
# 上周准确率计算
2025-03-05 09:47:02 +08:00
accuracy_df = sqlitedb.select_data(table_name="accuracy")
2025-02-11 16:31:52 +08:00
predict_y = accuracy_df.copy()
# ids = predict_y[predict_y['min_price'].isnull()]['id'].tolist()
ids = predict_y['id'].tolist()
# 准确率基准与绘图上下界逻辑一致
# predict_y[['min_price','max_price']] = predict_y[['min_within_quantile','max_within_quantile']]
2025-03-05 09:47:02 +08:00
# 模型评估前五均值
2025-02-11 16:31:52 +08:00
# predict_y['min_price'] = predict_y[modelnames].mean(axis=1) -1
# predict_y['max_price'] = predict_y[modelnames].mean(axis=1) +1
2025-03-05 09:47:02 +08:00
# 模型评估前十均值
predict_y['min_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) - 1.5
2025-02-11 16:31:52 +08:00
predict_y['mean'] = predict_y[allmodelnames[0:10]].mean(axis=1)
2025-03-05 09:47:02 +08:00
predict_y['max_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) + 1.5
2025-02-11 16:31:52 +08:00
# 模型评估前十最大最小
# allmodelnames 和 predict_y 列 重复的
# allmodelnames = [col for col in allmodelnames if col in predict_y.columns]
2025-03-05 09:47:02 +08:00
# predict_y['min_price'] = predict_y[allmodelnames[0:10]].min(axis=1)
2025-02-11 16:31:52 +08:00
# predict_y['max_price'] = predict_y[allmodelnames[0:10]].max(axis=1)
for id in ids:
row = predict_y[predict_y['id'] == id]
try:
2025-03-05 09:47:02 +08:00
sqlitedb.update_data(
'accuracy', f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}", f"id = {id}")
2025-02-11 16:31:52 +08:00
except:
2025-03-11 11:25:43 +08:00
config.logger.error(f'更新accuracy表中的min_price,max_price,mean值失败row={row}')
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
df = accuracy_df.copy()
2025-03-05 09:47:02 +08:00
df['ds'] = pd.to_datetime(df['ds'])
2025-02-11 16:31:52 +08:00
df = df.reindex()
# 判断预测值在不在布伦特最高最低价范围内准确率为1否则为0
def is_within_range(row):
for model in allmodelnames:
if row['LOW_PRICE'] <= row[col] <= row['HIGH_PRICE']:
return 1
else:
return 0
# 定义一个函数来计算准确率
def calculate_accuracy(row):
# 比较真实最高最低,和预测最高最低 计算准确率
# 全子集情况:
if (row['max_price'] >= row['HIGH_PRICE'] and row['min_price'] <= row['LOW_PRICE']) or \
2025-03-05 09:47:02 +08:00
(row['max_price'] <= row['HIGH_PRICE'] and row['min_price'] >= row['LOW_PRICE']):
return 1
2025-02-11 16:31:52 +08:00
# 无交集情况:
if row['max_price'] < row['LOW_PRICE'] or \
2025-03-05 09:47:02 +08:00
row['min_price'] > row['HIGH_PRICE']:
2025-02-11 16:31:52 +08:00
return 0
# 有交集情况:
else:
2025-03-05 09:47:02 +08:00
sorted_prices = sorted(
[row['LOW_PRICE'], row['min_price'], row['max_price'], row['HIGH_PRICE']])
2025-02-11 16:31:52 +08:00
middle_diff = sorted_prices[2] - sorted_prices[1]
price_range = row['HIGH_PRICE'] - row['LOW_PRICE']
accuracy = middle_diff / price_range
return accuracy
2025-03-05 09:47:02 +08:00
columns = ['HIGH_PRICE', 'LOW_PRICE', 'min_price', 'max_price']
2025-02-11 16:31:52 +08:00
df[columns] = df[columns].astype(float)
df['ACCURACY'] = df.apply(calculate_accuracy, axis=1)
# df['ACCURACY'] = df.apply(is_within_range, axis=1)
# 计算准确率并保存结果
2025-03-05 09:47:02 +08:00
def _get_accuracy_rate(df, create_dates, ds_dates):
2025-02-11 16:31:52 +08:00
df3 = df.copy()
df3 = df3[df3['CREAT_DATE'].isin(create_dates)]
df3 = df3[df3['ds'].isin(ds_dates)]
accuracy_rote = 0
2025-03-05 09:47:02 +08:00
for i, group in df3.groupby('CREAT_DATE'):
accuracy_rote += (group['ACCURACY'].sum() /
len(group))*weight_dict[len(group)-1]
accuracy_rote = round(accuracy_rote, 2)
df4 = pd.DataFrame(columns=['开始日期', '结束日期', '准确率'])
df4.loc[len(df4)] = {'开始日期': ds_dates[0],
'结束日期': ds_dates[-1], '准确率': accuracy_rote}
df4.to_sql("accuracy_rote", con=sqlitedb.connection,
if_exists='append', index=False)
create_dates, ds_dates = get_week_date(end_time)
_get_accuracy_rate(df, create_dates, ds_dates)
2025-02-11 16:31:52 +08:00
def _add_abs_error_rate():
# 计算每个预测值与真实值之间的偏差率
for model in allmodelnames:
2025-03-05 09:47:02 +08:00
df_combined3[f'{model}_abs_error_rate'] = abs(
df_combined3['y'] - df_combined3[model]) / df_combined3['y']
2025-02-11 16:31:52 +08:00
# 获取每行对应的最小偏差率值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_values = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].min(), axis=1)
2025-02-11 16:31:52 +08:00
# 获取每行对应的最小偏差率值对应的列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].idxmin(), axis=1)
2025-02-11 16:31:52 +08:00
# 将列名索引转换为列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
lambda x: x.split('_')[0])
2025-02-11 16:31:52 +08:00
# 获取最小偏差率对应的模型的预测值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_predictions = df_combined3.apply(
lambda row: row[min_abs_error_rate_column_name[row.name]], axis=1)
2025-02-11 16:31:52 +08:00
# 将最小偏差率对应的模型的预测值添加到DataFrame中
df_combined3['min_abs_error_rate_prediction'] = min_abs_error_rate_predictions
df_combined3['min_abs_error_rate_column_name'] = min_abs_error_rate_column_name
# _add_abs_error_rate()
# 判断 df 的数值列转为float
for col in df_combined3.columns:
try:
if col != 'ds':
df_combined3[col] = df_combined3[col].astype(float)
df_combined3[col] = df_combined3[col].round(2)
except ValueError:
pass
2025-03-05 09:47:02 +08:00
df_combined3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"testandpredict_groupby.csv"), index=False)
2025-03-05 09:47:02 +08:00
# 历史价格+预测价格
2025-02-11 16:31:52 +08:00
sqlitedb.drop_table('testandpredict_groupby')
2025-03-05 09:47:02 +08:00
df_combined3.to_sql('testandpredict_groupby',
sqlitedb.connection, index=False)
2025-02-11 16:31:52 +08:00
def _plt_predict_ture(df):
lens = df.shape[0] if df.shape[0] < 180 else 90
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
2025-02-11 16:31:52 +08:00
# 历史价格
plt.figure(figsize=(20, 10))
# 时间格式更改
df['ds'] = pd.to_datetime(df['ds'])
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
plt.plot(df['ds'], df['y'], label='真实值')
# 颜色填充
2025-03-05 09:47:02 +08:00
plt.fill_between(df['ds'], df['max_within_quantile'],
df['min_within_quantile'], alpha=0.2)
2025-02-11 16:31:52 +08:00
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
for model in most_model:
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df[model], label=model, marker='o')
2025-02-11 16:31:52 +08:00
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
# 设置横轴日期格式为年-月-日
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# 自动设置横轴日期显示
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
plt.xticks(rotation=45) # 日期标签旋转45度防止重叠
plt.ylabel('价格')
2025-03-05 09:47:02 +08:00
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值.png'), bbox_inches='tight')
2025-02-11 16:31:52 +08:00
plt.close()
def _plt_modeltopten_predict_ture(df):
df['ds'] = pd.to_datetime(df['ds'])
df['max_cutoff'] = df.groupby('ds')['CREAT_DATE'].transform('max')
df = df[df['CREAT_DATE'] == df['max_cutoff']]
df['mean'] = df['mean'].astype(float)
lens = df.shape[0] if df.shape[0] < 180 else 180
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
2025-02-11 16:31:52 +08:00
# 历史价格
plt.figure(figsize=(20, 10))
plt.plot(df['ds'], df['y'], label='真实值')
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df['mean'], label='模型前十均值',
linestyle='--', color='orange')
2025-02-11 16:31:52 +08:00
# 颜色填充
plt.fill_between(df['ds'], df['max_price'], df['min_price'], alpha=0.2)
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
# 当前日期画竖虚线
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
plt.legend()
plt.xlabel('日期')
# 自动设置横轴日期显示
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
plt.xticks(rotation=45) # 日期标签旋转45度防止重叠
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
plt.ylabel('价格')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值1.png'),
2025-03-05 09:47:02 +08:00
bbox_inches='tight')
plt.close()
2025-02-11 16:31:52 +08:00
2025-03-05 09:47:02 +08:00
def _plt_predict_table(df):
2025-02-11 16:31:52 +08:00
# 预测值表格
fig, ax = plt.subplots(figsize=(20, 6))
ax.axis('off') # 关闭坐标轴
# 数值保留2位小数
df = df.round(2)
df = df[-horizon:]
2025-03-05 09:47:02 +08:00
df['Day'] = [f'Day_{i}' for i in range(1, horizon+1)]
2025-02-11 16:31:52 +08:00
# Day列放到最前面
df = df[['Day'] + list(df.columns[:-1])]
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=df.values,
colLabels=df.columns, loc='center')
# 加宽表格
2025-02-11 16:31:52 +08:00
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值表格.png'), bbox_inches='tight')
2025-02-11 16:31:52 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
def _plt_model_results3():
# 可视化评估结果
plt.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots(figsize=(20, 10))
ax.axis('off') # 关闭坐标轴
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=model_results3.values,
colLabels=model_results3.columns, loc='center')
2025-02-11 16:31:52 +08:00
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '模型评估.png'), bbox_inches='tight')
2025-02-11 16:31:52 +08:00
plt.close()
_plt_predict_ture(df_combined3)
_plt_modeltopten_predict_ture(df_combined4)
_plt_predict_table(df_combined3)
_plt_model_results3()
return model_results3
2025-03-05 09:47:02 +08:00
2024-11-21 13:33:07 +08:00
# 聚烯烃计算预测评估指数
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-11 11:25:43 +08:00
def model_losss_juxiting(sqlitedb,end_time,is_fivemodels):
global dataset
2024-11-21 13:33:07 +08:00
global rote
2025-03-05 09:47:02 +08:00
most_model = [sqlitedb.select_data('most_model', columns=[
'most_common_model'], order_by='ds desc', limit=1).values[0][0]]
most_model_name = most_model[0]
# 预测数据处理 predict
2025-03-11 11:25:43 +08:00
df_combined = loadcsv(os.path.join(config.dataset, "cross_validation.csv"))
df_combined = dateConvert(df_combined)
# 删除空列
2025-03-05 09:47:02 +08:00
df_combined.dropna(axis=1, inplace=True)
# 删除缺失值,预测过程不能有缺失值
df_combined.dropna(inplace=True)
# 其他列转为数值类型
2025-03-05 09:47:02 +08:00
df_combined = df_combined.astype(
{col: 'float32' for col in df_combined.columns if col not in ['cutoff', 'ds']})
# 使用 groupby 和 transform 结合 lambda 函数来获取每个分组中 cutoff 的最小值,并创建一个新的列来存储这个最大值
2025-03-05 09:47:02 +08:00
df_combined['max_cutoff'] = df_combined.groupby(
'ds')['cutoff'].transform('max')
# 然后筛选出那些 cutoff 等于 max_cutoff 的行,这样就得到了每个分组中 cutoff 最大的行,并保留了其他列
2025-03-05 09:47:02 +08:00
df_combined = df_combined[df_combined['cutoff']
== df_combined['max_cutoff']]
# 删除模型生成的cutoff列
df_combined.drop(columns=['cutoff', 'max_cutoff'], inplace=True)
# 获取模型名称
2025-03-05 09:47:02 +08:00
modelnames = df_combined.columns.to_list()[1:]
if 'y' in modelnames:
modelnames.remove('y')
2025-02-11 16:31:52 +08:00
if 'ds' in modelnames:
modelnames.remove('ds')
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
2024-11-07 11:32:38 +08:00
# 空的列表存储每个模型的MSE、RMSE、MAE、MAPE、SMAPE
cellText = []
2025-03-05 09:47:02 +08:00
# 遍历模型名称,计算模型评估指标
2024-11-07 11:32:38 +08:00
for model in modelnames:
modelmse = mse(df_combined['y'], df_combined[model])
modelrmse = rmse(df_combined['y'], df_combined[model])
modelmae = mae(df_combined['y'], df_combined[model])
# modelmape = mape(df_combined['y'], df_combined[model])
# modelsmape = smape(df_combined['y'], df_combined[model])
# modelr2 = r2_score(df_combined['y'], df_combined[model])
2025-03-05 09:47:02 +08:00
cellText.append([model, round(modelmse, 3), round(
modelrmse, 3), round(modelmae, 3)])
model_results3 = pd.DataFrame(
cellText, columns=['模型(Model)', '平均平方误差(MSE)', '均方根误差(RMSE)', '平均绝对误差(MAE)'])
2024-11-07 11:32:38 +08:00
# 按MSE降序排列
2025-03-05 09:47:02 +08:00
model_results3 = model_results3.sort_values(
by='平均平方误差(MSE)', ascending=True)
model_results3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset, "model_evaluation.csv"), index=False)
2024-11-07 11:32:38 +08:00
modelnames = model_results3['模型(Model)'].tolist()
allmodelnames = modelnames.copy()
# 保存5个最佳模型的名称
if len(modelnames) > 5:
modelnames = modelnames[0:5]
2025-03-05 09:47:02 +08:00
if is_fivemodels:
pass
else:
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, "best_modelnames.txt"), 'w') as f:
f.write(','.join(modelnames) + '\n')
2024-11-07 11:32:38 +08:00
2024-11-20 14:14:27 +08:00
# 预测值与真实值对比图
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(15, 10))
2025-03-05 09:47:02 +08:00
for n, model in enumerate(modelnames[:5]):
2024-11-20 14:14:27 +08:00
plt.subplot(3, 2, n+1)
plt.plot(df_combined3['ds'], df_combined3['y'], label='真实值')
plt.plot(df_combined3['ds'], df_combined3[model], label=model)
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
plt.title(model+'拟合')
plt.subplots_adjust(hspace=0.5)
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值与真实值对比图.png'), bbox_inches='tight')
2024-11-20 14:14:27 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
# # 历史数据+预测数据
# # 拼接未来时间预测
2025-03-11 11:25:43 +08:00
df_predict = pd.read_csv(os.path.join(config.dataset, 'predict.csv'))
2025-03-05 09:47:02 +08:00
df_predict.drop('unique_id', inplace=True, axis=1)
df_predict.dropna(axis=1, inplace=True)
2024-11-20 14:14:27 +08:00
try:
2025-03-05 09:47:02 +08:00
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y-%m-%d')
except ValueError:
df_predict['ds'] = pd.to_datetime(df_predict['ds'], format=r'%Y/%m/%d')
2024-11-20 14:14:27 +08:00
def first_row_to_database(df):
# # 取第一行数据存储到数据库中
first_row = df.head(1)
first_row['ds'] = first_row['ds'].dt.strftime('%Y-%m-%d 00:00:00')
# 将预测结果保存到数据库
if not sqlitedb.check_table_exists('trueandpredict'):
2025-03-05 09:47:02 +08:00
first_row.to_sql('trueandpredict',
sqlitedb.connection, index=False)
2024-11-20 14:14:27 +08:00
else:
for col in first_row.columns:
2025-03-05 09:47:02 +08:00
sqlitedb.add_column_if_not_exists(
'trueandpredict', col, 'TEXT')
2024-11-20 14:14:27 +08:00
for row in first_row.itertuples(index=False):
row_dict = row._asdict()
2025-03-05 09:47:02 +08:00
columns = row_dict.keys()
check_query = sqlitedb.select_data(
'trueandpredict', where_condition=f"ds = '{row.ds}'")
2024-11-20 14:14:27 +08:00
if len(check_query) > 0:
2025-03-05 09:47:02 +08:00
set_clause = ", ".join(
[f"{key} = '{value}'" for key, value in row_dict.items()])
sqlitedb.update_data(
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
2024-11-20 14:14:27 +08:00
continue
2025-03-05 09:47:02 +08:00
sqlitedb.insert_data('trueandpredict', tuple(
row_dict.values()), columns=columns)
2024-11-20 14:14:27 +08:00
first_row_to_database(df_predict)
2024-11-21 13:33:07 +08:00
df_combined3 = pd.concat([df_combined3, df_predict]).reset_index(drop=True)
# 计算每个模型与最佳模型的绝对误差比例根据设置的阈值rote筛选预测值显示最大最小值
names = []
names_df = df_combined3.copy()
for col in allmodelnames:
2025-03-05 09:47:02 +08:00
names_df[f'{col}-{most_model_name}-误差比例'] = abs(
names_df[col] - names_df[most_model_name]) / names_df[most_model_name]
2024-11-21 13:33:07 +08:00
names.append(f'{col}-{most_model_name}-误差比例')
names_df = names_df[names]
2025-03-05 09:47:02 +08:00
2024-11-21 13:33:07 +08:00
def add_rote_column(row):
columns = []
for r in names_df.columns:
2025-03-11 11:25:43 +08:00
if row[r] <= config.rote:
2024-11-21 13:33:07 +08:00
columns.append(r.split('-')[0])
return pd.Series([columns], index=['columns'])
names_df['columns'] = names_df.apply(add_rote_column, axis=1)
2025-03-05 09:47:02 +08:00
2024-11-21 13:33:07 +08:00
def add_upper_lower_bound(row):
print(row['columns'])
print(type(row['columns']))
# 计算上边界值
2025-03-05 09:47:02 +08:00
upper_bound = df_combined3.loc[row.name, row['columns']].max()
2024-11-21 13:33:07 +08:00
# 计算下边界值
2025-03-05 09:47:02 +08:00
lower_bound = df_combined3.loc[row.name, row['columns']].min()
2024-11-22 13:26:10 +08:00
return pd.Series([lower_bound, upper_bound], index=['min_within_quantile', 'max_within_quantile'])
2025-03-05 09:47:02 +08:00
df_combined3[['min_within_quantile', 'max_within_quantile']
] = names_df.apply(add_upper_lower_bound, axis=1)
2024-11-20 14:14:27 +08:00
def find_most_common_model():
# 最多频率的模型名称
2025-03-05 09:47:02 +08:00
min_model_max_frequency_model = df_combined3['min_model'].tail(
20).value_counts().idxmax()
max_model_max_frequency_model = df_combined3['max_model'].tail(
20).value_counts().idxmax()
2024-11-20 14:14:27 +08:00
if min_model_max_frequency_model == max_model_max_frequency_model:
# 取20天第二多的模型
2025-03-05 09:47:02 +08:00
max_model_max_frequency_model = df_combined3['max_model'].tail(
20).value_counts().nlargest(2).index[1]
2024-11-20 14:14:27 +08:00
df_predict['min_model'] = min_model_max_frequency_model
df_predict['max_model'] = max_model_max_frequency_model
df_predict['min_within_quantile'] = df_predict[min_model_max_frequency_model]
df_predict['max_within_quantile'] = df_predict[max_model_max_frequency_model]
2024-11-21 13:33:07 +08:00
# find_most_common_model()
2024-11-20 14:14:27 +08:00
df_predict2 = df_predict.copy()
df_predict2['ds'] = pd.to_datetime(df_predict2['ds'])
2024-12-25 16:13:22 +08:00
df_predict2['ds'] = df_predict2['ds'].dt.strftime('%Y-%m-%d')
2024-11-20 14:14:27 +08:00
def _add_abs_error_rate():
# 计算每个预测值与真实值之间的偏差率
for model in allmodelnames:
2025-03-05 09:47:02 +08:00
df_combined3[f'{model}_abs_error_rate'] = abs(
df_combined3['y'] - df_combined3[model]) / df_combined3['y']
2024-11-20 14:14:27 +08:00
# 获取每行对应的最小偏差率值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_values = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].min(), axis=1)
2024-11-20 14:14:27 +08:00
# 获取每行对应的最小偏差率值对应的列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = df_combined3.apply(
lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].idxmin(), axis=1)
2024-11-20 14:14:27 +08:00
# 将列名索引转换为列名
2025-03-05 09:47:02 +08:00
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
lambda x: x.split('_')[0])
2024-11-20 14:14:27 +08:00
# 获取最小偏差率对应的模型的预测值
2025-03-05 09:47:02 +08:00
min_abs_error_rate_predictions = df_combined3.apply(
lambda row: row[min_abs_error_rate_column_name[row.name]], axis=1)
2024-11-20 14:14:27 +08:00
# 将最小偏差率对应的模型的预测值添加到DataFrame中
df_combined3['min_abs_error_rate_prediction'] = min_abs_error_rate_predictions
df_combined3['min_abs_error_rate_column_name'] = min_abs_error_rate_column_name
2024-11-21 13:33:07 +08:00
# _add_abs_error_rate()
2024-11-20 14:14:27 +08:00
# 判断 df 的数值列转为float
for col in df_combined3.columns:
try:
if col != 'ds':
df_combined3[col] = df_combined3[col].astype(float)
df_combined3[col] = df_combined3[col].round(2)
except ValueError:
pass
2025-03-05 09:47:02 +08:00
df_combined3.to_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,"testandpredict_groupby.csv"), index=False)
2025-03-05 09:47:02 +08:00
# 历史价格+预测价格
sqlitedb.drop_table('testandpredict_groupby')
2025-03-05 09:47:02 +08:00
df_combined3.to_sql('testandpredict_groupby',
sqlitedb.connection, index=False)
2024-11-20 14:14:27 +08:00
def _plt_predict_ture(df):
2024-11-21 13:33:07 +08:00
lens = df.shape[0] if df.shape[0] < 180 else 90
2025-03-05 09:47:02 +08:00
df = df[-lens:] # 取180个数据点画图
2024-11-20 14:14:27 +08:00
# 历史价格
plt.figure(figsize=(20, 10))
plt.plot(df['ds'], df['y'], label='真实值')
# 颜色填充
2025-03-05 09:47:02 +08:00
plt.fill_between(df['ds'], df['max_within_quantile'],
df['min_within_quantile'], alpha=0.2)
2024-11-22 13:26:10 +08:00
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
# random_marker = random.choice(markers)
# for model in allmodelnames:
# for model in ['BiTCN','RNN']:
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
2024-11-20 14:14:27 +08:00
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
# 网格
plt.grid(True)
# 显示历史值
for i, j in zip(df['ds'], df['y']):
plt.text(i, j, str(j), ha='center', va='bottom')
for model in most_model:
2025-03-05 09:47:02 +08:00
plt.plot(df['ds'], df[model], label=model, marker='o')
2024-11-20 14:14:27 +08:00
# 当前日期画竖虚线
2025-03-11 11:25:43 +08:00
plt.axvline(x=df['ds'].iloc[-config.horizon], color='r', linestyle='--')
2024-11-20 14:14:27 +08:00
plt.legend()
plt.xlabel('日期')
plt.ylabel('价格')
2025-03-05 09:47:02 +08:00
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '历史价格-预测值.png'), bbox_inches='tight')
2024-11-20 14:14:27 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
def _plt_predict_table(df):
2024-11-20 14:14:27 +08:00
# 预测值表格
fig, ax = plt.subplots(figsize=(20, 6))
ax.axis('off') # 关闭坐标轴
# 数值保留2位小数
df = df.round(2)
2025-03-11 11:25:43 +08:00
df = df[-config.horizon:]
df['Day'] = [f'Day_{i}' for i in range(1, config.horizon+1)]
2024-11-20 14:14:27 +08:00
# Day列放到最前面
df = df[['Day'] + list(df.columns[:-1])]
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=df.values,
colLabels=df.columns, loc='center')
# 加宽表格
2024-11-20 14:14:27 +08:00
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '预测值表格.png'), bbox_inches='tight')
2024-11-20 14:14:27 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
2024-11-20 14:14:27 +08:00
def _plt_model_results3():
# 可视化评估结果
plt.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots(figsize=(20, 10))
ax.axis('off') # 关闭坐标轴
2025-03-05 09:47:02 +08:00
table = ax.table(cellText=model_results3.values,
colLabels=model_results3.columns, loc='center')
2024-11-20 14:14:27 +08:00
# 加宽表格
table.auto_set_font_size(False)
table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '模型评估.png'), bbox_inches='tight')
2024-11-20 14:14:27 +08:00
plt.close()
_plt_predict_ture(df_combined3)
_plt_predict_table(df_combined3)
_plt_model_results3()
2024-11-01 16:38:21 +08:00
return model_results3
2024-11-05 14:30:43 +08:00
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, inputsize=5, dataset='dataset', time='2024-07-30', reportname='report.pdf', sqlitedb='jbsh_yuanyou.db'):
2024-11-01 16:38:21 +08:00
global y
# 创建内容对应的空列表
content = list()
2024-11-05 14:30:43 +08:00
# 获取特征的近一月值
import pandas as pd
2025-03-05 09:47:02 +08:00
feature_data_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), parse_dates=['ds']).tail(60)
2025-03-05 09:47:02 +08:00
2024-11-05 14:30:43 +08:00
def draw_feature_trend(feature_data_df, features):
2025-03-05 09:47:02 +08:00
# 画特征近60天的趋势图
feature_df = feature_data_df[['ds', 'y']+features]
# 遍历X每一列和yy画散点图
for i, col in enumerate(features):
# try:
print(f'正在绘制第{i+1}个特征{col}与价格散点图...')
if col not in ['ds', 'y']:
fig, ax1 = plt.subplots(figsize=(10, 6))
# 在第一个坐标轴上绘制数据
sns.lineplot(data=feature_df, x='ds', y='y', ax=ax1, color='b')
ax1.set_xlabel('日期')
ax1.set_ylabel('y', color='b')
ax1.tick_params('y', colors='b')
# 在 ax1 上添加文本显示值,添加一定的偏移避免值与曲线重叠
for j in range(1, len(feature_df), 2):
value = feature_df['y'].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
ax1.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='b', fontsize=10)
# 创建第二个坐标轴
ax2 = ax1.twinx()
# 在第二个坐标轴上绘制数据
sns.lineplot(data=feature_df, x='ds', y=col, ax=ax2, color='r')
ax2.set_ylabel(col, color='r')
ax2.tick_params('y', colors='r')
# 在 ax2 上添加文本显示值,添加一定的偏移避免值与曲线重叠
for j in range(0, len(feature_df), 2):
value = feature_df[col].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.0003
ax2.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='r', fontsize=10)
# 添加标题
plt.title(col)
# 设置横坐标为日期格式并自动调整
locator = mdates.AutoDateLocator()
formatter = mdates.AutoDateFormatter(locator)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
# 文件名特殊字符处理
col = col.replace('*', '-')
col = col.replace(':', '-')
col = col.replace(r'/', '-')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, f'{col}与价格散点图.png'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{col}与价格散点图.png')))
2025-03-05 09:47:02 +08:00
plt.close()
# except Exception as e:
# print(f'绘制第{i+1}个特征{col}与价格散点图时出错:{e}')
# 添加标题
content.append(Graphs.draw_title(f'{config.y}{time}预测报告'))
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 预测结果
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('一、预测结果:'))
2024-11-05 16:13:36 +08:00
# 添加历史走势及预测价格的走势图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '历史价格-预测值.png')))
2024-11-12 18:26:20 +08:00
# 波动率画图逻辑
2024-11-05 16:13:36 +08:00
content.append(Graphs.draw_text('图示说明:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
' 确定置信区间:设置残差置信阈值,以每周最佳模型为基准,选取在置信区间的预测值作为置信区间;'))
2024-12-27 14:15:20 +08:00
# 添加历史走势及预测价格的走势图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '历史价格-预测值1.png')))
content.append(Graphs.draw_text('图示说明:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
' 确定置信区间使用模型评估指标MAE得到前十个模型取平均值上下1.5作为价格波动置信区间;'))
2024-11-01 16:38:21 +08:00
# 取df中y列为空的行
import pandas as pd
2025-03-11 11:25:43 +08:00
df = pd.read_csv(os.path.join(config.dataset, 'predict.csv'), encoding='gbk')
2025-03-05 09:47:02 +08:00
df_true = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), encoding='utf-8') # 获取预测日期对应的真实值
2025-03-05 09:47:02 +08:00
df_true = df_true[['ds', 'y']]
eval_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 按评估指标排序,取前五
fivemodels_list = eval_df['模型(Model)'].values # 列表形式,后面当作列名索引使用
# 取 fivemodels_list 和 ds 列
2025-03-05 09:47:02 +08:00
df = df[['ds'] + fivemodels_list.tolist()]
2024-11-01 16:38:21 +08:00
# 拼接预测日期对应的真实值
df = pd.merge(df, df_true, on='ds', how='left')
# 删除全部为nan的列
df = df.dropna(how='all', axis=1)
# 选择除 'ds' 列外的数值列,并进行类型转换和四舍五入
2025-03-05 09:47:02 +08:00
num_cols = [col for col in df.columns if col !=
'ds' and pd.api.types.is_numeric_dtype(df[col])]
2024-11-01 16:38:21 +08:00
for col in num_cols:
df[col] = df[col].astype(float).round(2)
# 添加最大值、最小值、平均值三列
df['平均值'] = df[num_cols].mean(axis=1).round(2)
df['最大值'] = df[num_cols].max(axis=1)
df['最小值'] = df[num_cols].min(axis=1)
# df转置
df = df.T
# df重置索引
df = df.reset_index()
# 添加预测值表格
data = df.values.tolist()
col_width = 500/len(df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('二、上一预测周期偏差率分析:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'testandpredict_groupby.csv'), encoding='utf-8')
2025-03-05 09:47:02 +08:00
df4 = df.copy() # 计算偏差率使用
# 去掉created_dt 列
df4 = df4.drop(columns=['created_dt'])
2024-11-01 16:38:21 +08:00
# 计算模型偏差率
2025-03-05 09:47:02 +08:00
# 计算各列对于y列的差值百分比
2024-11-01 16:38:21 +08:00
df3 = pd.DataFrame() # 存储偏差率
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 删除有null的行
df4 = df4.dropna()
df3['ds'] = df4['ds']
for col in fivemodels_list:
2025-03-05 09:47:02 +08:00
df3[col] = round(abs(df4[col] - df4['y']) / df4['y'] * 100, 2)
2024-11-01 16:38:21 +08:00
# 找出决定系数前五的偏差率
df3 = df3[['ds']+fivemodels_list.tolist()][-inputsize:]
# 找出上一预测区间的时间
stime = df3['ds'].iloc[0]
etime = df3['ds'].iloc[-1]
# 添加偏差率表格
2025-03-05 09:47:02 +08:00
fivemodels = ''.join(eval_df['模型(Model)'].values[:5]) # 字符串形式,后面写入字符串使用
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练使用评估结果MAE前五的模型分别是 {fivemodels} ,模型上一预测区间 {stime} -- {etime}的偏差率(%)分别是:'))
2024-11-01 16:38:21 +08:00
# # 添加偏差率表格
df3 = df3.T
df3 = df3.reset_index()
data = df3.values.tolist()
col_width = 500/len(df3.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('上一周预测准确率:'))
2025-03-05 09:47:02 +08:00
df4 = sqlitedb.select_data('accuracy_rote', order_by='结束日期 desc', limit=1)
df4 = df4.T
df4 = df4.reset_index()
2024-12-30 14:00:16 +08:00
df4 = df4.T
data = df4.values.tolist()
col_width = 500/len(df4.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('三、预测过程解析:'))
2025-03-05 09:47:02 +08:00
# 特征、模型、参数配置
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'本次预测使用了一个专门收集时间序列的NeuralForecast库中的{num_models}个模型:'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'使用40天的数据预测未来{inputsize}天的数据。'))
content.append(Graphs.draw_little_title('指标情况:'))
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, '特征频度统计.txt'), encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
for line in f.readlines():
content.append(Graphs.draw_text(line))
2025-03-11 11:25:43 +08:00
data = pd.read_csv(os.path.join(config.dataset, '指标数据添加时间特征.csv'),
2025-03-05 09:47:02 +08:00
encoding='utf-8') # 计算相关系数用
df_zhibiaofenlei = loadcsv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'特征处理后的指标名称及分类.csv')) # 气泡图用
2024-11-01 16:38:21 +08:00
df_zhibiaoshuju = data.copy() # 气泡图用
# 绘制特征相关气泡图
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
grouped = df_zhibiaofenlei.groupby('指标分类')
2024-11-05 14:30:43 +08:00
grouped_corr = pd.DataFrame(columns=['指标分类', '指标数量', '相关性总和'])
2025-03-05 09:47:02 +08:00
2024-11-05 14:30:43 +08:00
content.append(Graphs.draw_little_title('按指标分类分别与预测目标进行皮尔逊相关系数分析:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''皮尔逊相关系数说明:'''))
content.append(Graphs.draw_text('''衡量两个特征之间的线性相关性。'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('''
2025-03-05 09:47:02 +08:00
相关系数为1表示两个变量之间存在完全正向的线性关系即当一个变量增加时另一个变量也相应增加且变化是完全一致的'''))
content.append(Graphs.draw_text(
'''相关系数为-1表示两个变量之间存在完全负向的线性关系即当一个变量增加时另一个变量会相应减少且变化是完全相反的'''))
content.append(Graphs.draw_text(
'''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。'''))
2024-11-05 14:30:43 +08:00
for name, group in grouped:
cols = group['指标名称'].tolist()
config.logger.info(f'开始绘制{name}类指标的相关性直方图')
2024-11-05 14:30:43 +08:00
cols_subset = cols
feature_names = ['y'] + cols_subset
correlation_matrix = df_zhibiaoshuju[feature_names].corr()['y']
# 绘制特征相关性直方分布图
2025-03-05 09:47:02 +08:00
plt.figure(figsize=(10, 8))
sns.histplot(correlation_matrix.values.flatten(),
bins=20, kde=True, color='skyblue')
2024-11-05 14:30:43 +08:00
plt.title(f'{name}类指标(共{len(cols_subset)}个)相关性直方分布图')
plt.xlabel('相关系数')
plt.ylabel('频数')
2025-03-05 09:47:02 +08:00
plt.savefig(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,f'{name}类指标相关性直方分布图.png'), bbox_inches='tight')
2024-11-05 14:30:43 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{name}类指标相关性直方分布图.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标(共{len(cols_subset)}个)的相关性直方分布图如上所示。'))
2024-11-05 14:30:43 +08:00
# 相关性大于0的特征
2025-03-05 09:47:02 +08:00
positive_corr_features = correlation_matrix[correlation_matrix > 0].sort_values(
ascending=False).index.tolist()[1:]
print(f'{name}下正相关的特征值有:', positive_corr_features)
2024-11-05 14:30:43 +08:00
if len(positive_corr_features) > 5:
positive_corr_features = positive_corr_features[0:5]
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标中与预测目标y正相关前五的特征有{positive_corr_features}'))
2024-11-05 14:30:43 +08:00
draw_feature_trend(feature_data_df, positive_corr_features)
elif len(positive_corr_features) == 0:
pass
else:
positive_corr_features = positive_corr_features
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'其中与预测目标y正相关的特征有{positive_corr_features}'))
2024-11-05 14:30:43 +08:00
draw_feature_trend(feature_data_df, positive_corr_features)
2025-03-05 09:47:02 +08:00
2024-11-05 14:30:43 +08:00
# 相关性小于0的特征
2025-03-05 09:47:02 +08:00
negative_corr_features = correlation_matrix[correlation_matrix < 0].sort_values(
ascending=True).index.tolist()
print(f'{name}下负相关的特征值有:', negative_corr_features)
2024-11-05 14:30:43 +08:00
if len(negative_corr_features) > 5:
negative_corr_features = negative_corr_features[:5]
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'与预测目标y负相关前五的特征有{negative_corr_features}'))
2024-11-05 14:30:43 +08:00
draw_feature_trend(feature_data_df, negative_corr_features)
elif len(negative_corr_features) == 0:
pass
else:
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标中与预测目标y负相关的特征有{negative_corr_features}'))
2024-11-05 14:30:43 +08:00
draw_feature_trend(feature_data_df, negative_corr_features)
# 计算correlation_sum 第一行的相关性的绝对值的总和
correlation_sum = correlation_matrix.abs().sum()
config.logger.info(f'{name}类指标的相关性总和为:{correlation_sum}')
2024-11-05 14:30:43 +08:00
# 分组的相关性总和拼接到grouped_corr
2025-03-05 09:47:02 +08:00
goup_corr = pd.DataFrame(
{'指标分类': [name], '指标数量': [len(cols_subset)], '相关性总和': [correlation_sum]})
grouped_corr = pd.concat(
[grouped_corr, goup_corr], axis=0, ignore_index=True)
2024-11-05 14:30:43 +08:00
# 绘制相关性总和的气泡图
config.logger.info(f'开始绘制相关性总和的气泡图')
2024-11-05 14:30:43 +08:00
plt.figure(figsize=(10, 10))
2025-03-05 09:47:02 +08:00
sns.scatterplot(data=grouped_corr, x='相关性总和', y='指标数量', size='相关性总和', sizes=(
grouped_corr['相关性总和'].min()*5, grouped_corr['相关性总和'].max()*5), hue='指标分类', palette='viridis')
2024-11-05 14:30:43 +08:00
plt.title('指标分类相关性总和的气泡图')
plt.ylabel('数量')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '指标分类相关性总和的气泡图.png'),
2025-03-05 09:47:02 +08:00
bbox_inches='tight')
2024-11-05 14:30:43 +08:00
plt.close()
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '指标分类相关性总和的气泡图.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'气泡图中,横轴为指标分类,纵轴为指标分类下的特征数量,气泡的面积越大表示该分类中特征的相关系数和越大。'))
config.logger.info(f'绘制相关性总和的气泡图结束')
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练拟合通过评估指标MAE从小到大排列前5个模型的简介如下'))
# 读取模型简介
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'model_introduction.txt'), 'r', encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
for line in f:
line_split = line.strip().split('--')
if line_split[0] in fivemodels_list:
for introduction in line_split:
content.append(Graphs.draw_text(introduction))
content.append(Graphs.draw_little_title('模型评估:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 判断 df 的数值列转为float
for col in eval_df.columns:
if col not in ['模型(Model)']:
eval_df[col] = eval_df[col].astype(float)
eval_df[col] = eval_df[col].round(3)
# 筛选 fivemodels_list.tolist() 的行
eval_df = eval_df[eval_df['模型(Model)'].isin(fivemodels_list)]
# df转置
eval_df = eval_df.T
# df重置索引
eval_df = eval_df.reset_index()
2024-12-30 14:00:16 +08:00
eval_df = eval_df.T
2024-11-01 16:38:21 +08:00
# # 添加表格
data = eval_df.values.tolist()
col_width = 500/len(eval_df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('评估指标释义:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'1. 均方根误差(RMSE):均方根误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'2. 平均绝对误差(MAE):平均绝对误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'3. 平均平方误差(MSE):平均平方误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('模型拟合:'))
# 添加图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '预测值与真实值对比图.png')))
2025-03-05 09:47:02 +08:00
# 生成pdf文件
2025-03-11 11:25:43 +08:00
doc = SimpleDocTemplate(os.path.join(config.dataset, reportname), pagesize=letter)
2024-11-01 16:38:21 +08:00
doc.build(content)
# pdf 上传到数字化信息平台
try:
if config.is_update_report:
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, reportname), 'rb') as f:
2024-11-01 16:38:21 +08:00
base64_data = base64.b64encode(f.read()).decode('utf-8')
upload_data["data"]["fileBase64"] = base64_data
upload_data["data"]["fileName"] = reportname
token = get_head_auth_report()
upload_report_data(token, upload_data)
except TimeoutError as e:
print(f"请求超时: {e}")
2024-11-12 18:26:20 +08:00
2025-03-05 09:47:02 +08:00
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def pp_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, inputsize=5, dataset='dataset', time='2024-07-30', reportname='report.pdf', sqlitedb='jbsh_yuanyou.db'):
2024-11-12 18:26:20 +08:00
# 创建内容对应的空列表
content = list()
# 获取特征的近一月值
import pandas as pd
2025-03-05 09:47:02 +08:00
feature_data_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), parse_dates=['ds']).tail(20)
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
def draw_feature_trend(feature_data_df, features):
2025-03-05 09:47:02 +08:00
# 画特征近一周的趋势图
feature_df = feature_data_df[['ds', 'y']+features]
# 遍历X每一列和yy画散点图
for i, col in enumerate(features):
# try:
print(f'正在绘制第{i+1}个特征{col}与价格散点图...')
if col not in ['ds', 'y']:
fig, ax1 = plt.subplots(figsize=(10, 6))
# 在第一个坐标轴上绘制数据
sns.lineplot(data=feature_df, x='ds', y='y', ax=ax1, color='b')
ax1.set_xlabel('日期')
ax1.set_ylabel('y', color='b')
ax1.tick_params('y', colors='b')
# 在 ax1 上添加文本显示值,添加一定的偏移避免值与曲线重叠
for j in range(1, len(feature_df), 2):
value = feature_df['y'].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
ax1.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='b', fontsize=10)
# 创建第二个坐标轴
ax2 = ax1.twinx()
# 在第二个坐标轴上绘制数据
sns.lineplot(data=feature_df, x='ds', y=col, ax=ax2, color='r')
ax2.set_ylabel(col, color='r')
ax2.tick_params('y', colors='r')
# 在 ax2 上添加文本显示值,添加一定的偏移避免值与曲线重叠
for j in range(0, len(feature_df), 2):
value = feature_df[col].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.0003
ax2.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='r', fontsize=10)
# 添加标题
plt.title(col)
# 设置横坐标为日期格式并自动调整
locator = mdates.AutoDateLocator()
formatter = mdates.AutoDateFormatter(locator)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
# 文件名特殊字符处理
col = col.replace('*', '-')
col = col.replace(':', '-')
col = col.replace(r'/', '-')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, f'{col}与价格散点图.png'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{col}与价格散点图.png')))
2025-03-05 09:47:02 +08:00
plt.close()
# except Exception as e:
# print(f'绘制第{i+1}个特征{col}与价格散点图时出错:{e}')
# 添加标题
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_title(f'{config.y}{time}预测报告'))
2025-03-05 09:47:02 +08:00
# 预测结果
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('一、预测结果:'))
# 添加历史走势及预测价格的走势图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '历史价格-预测值.png')))
2024-11-12 18:26:20 +08:00
# 取df中y列为空的行
import pandas as pd
2025-03-11 11:25:43 +08:00
df = pd.read_csv(os.path.join(config.dataset, 'predict.csv'), encoding='gbk')
2025-03-05 09:47:02 +08:00
df_true = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), encoding='utf-8') # 获取预测日期对应的真实值
2025-03-05 09:47:02 +08:00
df_true = df_true[['ds', 'y']]
eval_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-12 18:26:20 +08:00
# 按评估指标排序,取前五
fivemodels_list = eval_df['模型(Model)'].values # 列表形式,后面当作列名索引使用
# 取 fivemodels_list 和 ds 列
2025-03-05 09:47:02 +08:00
df = df[['ds'] + fivemodels_list.tolist()]
2024-11-12 18:26:20 +08:00
# 拼接预测日期对应的真实值
df = pd.merge(df, df_true, on='ds', how='left')
# 删除全部为nan的列
df = df.dropna(how='all', axis=1)
# 选择除 'ds' 列外的数值列,并进行类型转换和四舍五入
2025-03-05 09:47:02 +08:00
num_cols = [col for col in df.columns if col !=
'ds' and pd.api.types.is_numeric_dtype(df[col])]
2024-11-12 18:26:20 +08:00
for col in num_cols:
df[col] = df[col].astype(float).round(2)
# 添加最大值、最小值、平均值三列
df['平均值'] = df[num_cols].mean(axis=1).round(2)
df['最大值'] = df[num_cols].max(axis=1)
df['最小值'] = df[num_cols].min(axis=1)
# df转置
df = df.T
# df重置索引
df = df.reset_index()
# 添加预测值表格
data = df.values.tolist()
col_width = 500/len(df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('二、上一预测周期偏差率分析:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'testandpredict_groupby.csv'), encoding='utf-8')
2025-03-05 09:47:02 +08:00
df4 = df.copy() # 计算偏差率使用
2024-11-12 18:26:20 +08:00
# 计算模型偏差率
2025-03-05 09:47:02 +08:00
# 计算各列对于y列的差值百分比
2024-11-12 18:26:20 +08:00
df3 = pd.DataFrame() # 存储偏差率
2025-03-05 09:47:02 +08:00
2025-02-11 16:31:52 +08:00
# 删除y列有空值的行
df4 = df4.dropna(subset=['y'])
# # 删除有null的行
# df4 = df4.dropna()
2024-11-12 18:26:20 +08:00
df3['ds'] = df4['ds']
for col in fivemodels_list:
2025-03-05 09:47:02 +08:00
df3[col] = round(abs(df4[col] - df4['y']) / df4['y'] * 100, 2)
2024-11-12 18:26:20 +08:00
# 找出决定系数前五的偏差率
df3 = df3[['ds']+fivemodels_list.tolist()][-inputsize:]
# 找出上一预测区间的时间
stime = df3['ds'].iloc[0]
etime = df3['ds'].iloc[-1]
# 添加偏差率表格
2025-03-05 09:47:02 +08:00
fivemodels = ''.join(eval_df['模型(Model)'].values[:5]) # 字符串形式,后面写入字符串使用
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练使用评估结果MAE前五的模型分别是 {fivemodels} ,模型上一预测区间 {stime} -- {etime}的偏差率(%)分别是:'))
2024-11-12 18:26:20 +08:00
# # 添加偏差率表格
df3 = df3.T
df3 = df3.reset_index()
data = df3.values.tolist()
col_width = 500/len(df3.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('三、预测过程解析:'))
2025-03-05 09:47:02 +08:00
# 特征、模型、参数配置
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'本次预测使用了一个专门收集时间序列的NeuralForecast库中的{num_models}个模型:'))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_text(f'使用40天的数据预测未来{inputsize}天的数据。'))
content.append(Graphs.draw_little_title('指标情况:'))
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, '特征频度统计.txt'), encoding='utf-8') as f:
2024-11-12 18:26:20 +08:00
for line in f.readlines():
content.append(Graphs.draw_text(line))
2025-03-11 11:25:43 +08:00
data = pd.read_csv(os.path.join(config.dataset, '指标数据添加时间特征.csv'),
2025-03-05 09:47:02 +08:00
encoding='utf-8') # 计算相关系数用
df_zhibiaofenlei = loadcsv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'特征处理后的指标名称及分类.csv')) # 气泡图用
2024-11-12 18:26:20 +08:00
df_zhibiaoshuju = data.copy() # 气泡图用
# 绘制特征相关气泡图
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
grouped = df_zhibiaofenlei.groupby('指标分类')
grouped_corr = pd.DataFrame(columns=['指标分类', '指标数量', '相关性总和'])
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('按指标分类分别与预测目标进行皮尔逊相关系数分析:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''皮尔逊相关系数说明:'''))
content.append(Graphs.draw_text('''衡量两个特征之间的线性相关性。'''))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_text('''
2025-03-05 09:47:02 +08:00
相关系数为1表示两个变量之间存在完全正向的线性关系即当一个变量增加时另一个变量也相应增加且变化是完全一致的'''))
content.append(Graphs.draw_text(
'''相关系数为-1表示两个变量之间存在完全负向的线性关系即当一个变量增加时另一个变量会相应减少且变化是完全相反的'''))
content.append(Graphs.draw_text(
'''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。'''))
2024-11-12 18:26:20 +08:00
for name, group in grouped:
cols = group['指标名称'].tolist()
2025-03-11 11:25:43 +08:00
config.logger.info(f'开始绘制{name}类指标的相关性直方图')
2024-11-12 18:26:20 +08:00
cols_subset = cols
feature_names = ['y'] + cols_subset
correlation_matrix = df_zhibiaoshuju[feature_names].corr()['y']
# 绘制特征相关性直方分布图
2025-03-05 09:47:02 +08:00
plt.figure(figsize=(10, 8))
sns.histplot(correlation_matrix.values.flatten(),
bins=20, kde=True, color='skyblue')
2024-11-12 18:26:20 +08:00
plt.title(f'{name}类指标(共{len(cols_subset)}个)相关性直方分布图')
plt.xlabel('相关系数')
plt.ylabel('频数')
2025-03-05 09:47:02 +08:00
plt.savefig(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,f'{name}类指标相关性直方分布图.png'), bbox_inches='tight')
2024-11-12 18:26:20 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{name}类指标相关性直方分布图.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标(共{len(cols_subset)}个)的相关性直方分布图如上所示。'))
2024-11-12 18:26:20 +08:00
# 相关性大于0的特征
2025-03-05 09:47:02 +08:00
positive_corr_features = correlation_matrix[correlation_matrix > 0].sort_values(
ascending=False).index.tolist()[1:]
print(f'{name}下正相关的特征值有:', positive_corr_features)
2024-11-12 18:26:20 +08:00
if len(positive_corr_features) > 5:
positive_corr_features = positive_corr_features[0:5]
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标中与预测目标y正相关前五的特征有{positive_corr_features}'))
2024-11-12 18:26:20 +08:00
draw_feature_trend(feature_data_df, positive_corr_features)
elif len(positive_corr_features) == 0:
pass
else:
positive_corr_features = positive_corr_features
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'其中与预测目标y正相关的特征有{positive_corr_features}'))
2024-11-12 18:26:20 +08:00
draw_feature_trend(feature_data_df, positive_corr_features)
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
# 相关性小于0的特征
2025-03-05 09:47:02 +08:00
negative_corr_features = correlation_matrix[correlation_matrix < 0].sort_values(
ascending=True).index.tolist()
print(f'{name}下负相关的特征值有:', negative_corr_features)
2024-11-12 18:26:20 +08:00
if len(negative_corr_features) > 5:
negative_corr_features = negative_corr_features[:5]
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'与预测目标y负相关前五的特征有{negative_corr_features}'))
2024-11-12 18:26:20 +08:00
draw_feature_trend(feature_data_df, negative_corr_features)
elif len(negative_corr_features) == 0:
pass
else:
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'{name}类指标中与预测目标y负相关的特征有{negative_corr_features}'))
2024-11-12 18:26:20 +08:00
draw_feature_trend(feature_data_df, negative_corr_features)
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
# 计算correlation_sum 第一行的相关性的绝对值的总和
correlation_sum = correlation_matrix.abs().sum()
2025-03-11 11:25:43 +08:00
config.logger.info(f'{name}类指标的相关性总和为:{correlation_sum}')
2024-11-12 18:26:20 +08:00
# 分组的相关性总和拼接到grouped_corr
2025-03-05 09:47:02 +08:00
goup_corr = pd.DataFrame(
{'指标分类': [name], '指标数量': [len(cols_subset)], '相关性总和': [correlation_sum]})
grouped_corr = pd.concat(
[grouped_corr, goup_corr], axis=0, ignore_index=True)
2024-11-12 18:26:20 +08:00
# 绘制相关性总和的气泡图
2025-03-11 11:25:43 +08:00
config.logger.info(f'开始绘制相关性总和的气泡图')
2024-11-12 18:26:20 +08:00
plt.figure(figsize=(10, 10))
2025-03-05 09:47:02 +08:00
sns.scatterplot(data=grouped_corr, x='相关性总和', y='指标数量', size='相关性总和', sizes=(
grouped_corr['相关性总和'].min()*5, grouped_corr['相关性总和'].max()*5), hue='指标分类', palette='viridis')
2024-11-12 18:26:20 +08:00
plt.title('指标分类相关性总和的气泡图')
plt.ylabel('数量')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '指标分类相关性总和的气泡图.png'),
2025-03-05 09:47:02 +08:00
bbox_inches='tight')
2024-11-12 18:26:20 +08:00
plt.close()
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '指标分类相关性总和的气泡图.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'气泡图中,横轴为指标分类,纵轴为指标分类下的特征数量,气泡的面积越大表示该分类中特征的相关系数和越大。'))
2025-03-11 11:25:43 +08:00
config.logger.info(f'绘制相关性总和的气泡图结束')
2025-03-05 09:47:02 +08:00
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练拟合通过评估指标MAE从小到大排列前5个模型的简介如下'))
2024-11-12 18:26:20 +08:00
2025-03-05 09:47:02 +08:00
# 读取模型简介
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'model_introduction.txt'), 'r', encoding='utf-8') as f:
2024-11-12 18:26:20 +08:00
for line in f:
line_split = line.strip().split('--')
if line_split[0] in fivemodels_list:
for introduction in line_split:
content.append(Graphs.draw_text(introduction))
content.append(Graphs.draw_little_title('模型评估:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-12 18:26:20 +08:00
# 判断 df 的数值列转为float
for col in eval_df.columns:
if col not in ['模型(Model)']:
eval_df[col] = eval_df[col].astype(float)
eval_df[col] = eval_df[col].round(3)
# 筛选 fivemodels_list.tolist() 的行
eval_df = eval_df[eval_df['模型(Model)'].isin(fivemodels_list)]
eval_df = eval_df.T
eval_df = eval_df.reset_index()
eval_df = eval_df.T
data = eval_df.values.tolist()
col_width = 500/len(eval_df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_text('评估指标释义:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'1. 均方根误差(RMSE):均方根误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'2. 平均绝对误差(MAE):平均绝对误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'3. 平均平方误差(MSE):平均平方误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
2024-11-12 18:26:20 +08:00
content.append(Graphs.draw_text('模型拟合:'))
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '预测值与真实值对比图.png')))
2024-11-12 18:26:20 +08:00
# 附1特征列表
content.append(Graphs.draw_little_title('附1、特征列表'))
2025-03-05 09:47:02 +08:00
df_fuyi = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'特征频度统计.csv'), encoding='utf-8')
2024-11-12 18:26:20 +08:00
for col in df_fuyi.columns:
fuyi = df_fuyi[col]
fuyi = fuyi.dropna()
content.append(Graphs.draw_text(f'{col}'))
for i in range(len(fuyi)):
content.append(Graphs.draw_text(f'{i+1}{fuyi[i]}'))
2025-03-05 09:47:02 +08:00
# 生成pdf文件
2025-03-11 11:25:43 +08:00
doc = SimpleDocTemplate(os.path.join(config.dataset, reportname), pagesize=letter)
# doc = SimpleDocTemplate(os.path.join(config.dataset,'reportname.pdf'), pagesize=letter)
2024-11-12 18:26:20 +08:00
doc.build(content)
# pdf 上传到数字化信息平台
try:
2025-03-11 11:25:43 +08:00
if config.is_update_report:
with open(os.path.join(config.dataset, reportname), 'rb') as f:
2024-11-12 18:26:20 +08:00
base64_data = base64.b64encode(f.read()).decode('utf-8')
2025-03-11 11:25:43 +08:00
global_config['upload_data']["data"]["fileBase64"] = base64_data
global_config['upload_data']["data"]["fileName"] = reportname
2024-11-12 18:26:20 +08:00
token = get_head_auth_report()
2025-03-11 11:25:43 +08:00
upload_report_data(token, config.upload_data)
2024-11-12 18:26:20 +08:00
except TimeoutError as e:
print(f"请求超时: {e}")
2025-03-05 09:47:02 +08:00
def pp_export_pdf_v1(num_indicators=475, num_models=21, num_dayindicator=202, inputsize=5, dataset='dataset', time='2024-07-30', reportname='report.pdf'):
2024-11-01 16:38:21 +08:00
global y
# 创建内容对应的空列表
content = list()
2025-03-05 09:47:02 +08:00
# 添加标题
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_title(f'{y}{time}预测报告'))
2025-03-05 09:47:02 +08:00
# 预测结果
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('一、预测结果:'))
# 添加图片
# 找出后缀是历史价格-预测值.png的图片
# import glob
2025-03-11 11:25:43 +08:00
# imgs = glob.glob(os.path.join(config.dataset,'*历史价格-预测值.png'))
2024-11-01 16:38:21 +08:00
# for img in imgs:
# content.append(Graphs.draw_img(img))
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '历史价格-预测值.png')))
2024-11-01 16:38:21 +08:00
# 取df中y列为空的行
import pandas as pd
2025-03-11 11:25:43 +08:00
df = pd.read_csv(os.path.join(config.dataset, 'predict.csv'), encoding='gbk')
2025-03-05 09:47:02 +08:00
df_true = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), encoding='utf-8') # 获取预测日期对应的真实值
2025-03-05 09:47:02 +08:00
df_true = df_true[['ds', 'y']]
eval_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 按评估指标排序,取前五
fivemodels_list = eval_df['模型(Model)'].values # 列表形式,后面当作列名索引使用
# 取 fivemodels_list 和 ds 列
2025-03-05 09:47:02 +08:00
df = df[['ds'] + fivemodels_list.tolist()]
2024-11-01 16:38:21 +08:00
# 拼接预测日期对应的真实值
df = pd.merge(df, df_true, on='ds', how='left')
# 删除全部为nan的列
df = df.dropna(how='all', axis=1)
# 选择除 'ds' 列外的数值列,并进行类型转换和四舍五入
2025-03-05 09:47:02 +08:00
num_cols = [col for col in df.columns if col !=
'ds' and pd.api.types.is_numeric_dtype(df[col])]
2024-11-01 16:38:21 +08:00
for col in num_cols:
df[col] = df[col].astype(float).round(2)
# 添加最大值、最小值、平均值三列
df['平均值'] = df[num_cols].mean(axis=1).round(2)
df['最大值'] = df[num_cols].max(axis=1)
df['最小值'] = df[num_cols].min(axis=1)
# df转置
df = df.T
# df重置索引
df = df.reset_index()
# 添加预测值表格
data = df.values.tolist()
col_width = 500/len(df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('二、上一预测周期偏差率分析:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'testandpredict_groupby.csv'), encoding='utf-8')
2025-03-05 09:47:02 +08:00
df4 = df.copy() # 计算偏差率使用
2024-11-01 16:38:21 +08:00
# 计算模型偏差率
2025-03-05 09:47:02 +08:00
# 计算各列对于y列的差值百分比
2024-11-01 16:38:21 +08:00
df3 = pd.DataFrame() # 存储偏差率
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 删除有null的行
df4 = df4.dropna()
df3['ds'] = df4['ds']
for col in df.columns:
2025-03-05 09:47:02 +08:00
if col not in ['y', 'ds', 'index']:
df3[col] = round(abs(df4[col] - df4['y']) / df4['y'] * 100, 2)
2024-11-01 16:38:21 +08:00
# 找出决定系数前五的偏差率
df3 = df3[['ds']+fivemodels_list.tolist()][-inputsize:]
# 找出上一预测区间的时间
stime = df3['ds'].iloc[0]
etime = df3['ds'].iloc[-1]
# 添加偏差率表格
2025-03-05 09:47:02 +08:00
fivemodels = ''.join(eval_df['模型(Model)'].values[:5]) # 字符串形式,后面写入字符串使用
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练使用评估结果MAE前五的模型分别是 {fivemodels} ,模型上一预测区间 {stime} -- {etime}的偏差率(%)分别是:'))
2024-11-01 16:38:21 +08:00
# # 添加偏差率表格
df3 = df3.T
df3 = df3.reset_index()
data = df3.values.tolist()
col_width = 500/len(df3.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('三、预测过程解析:'))
2025-03-05 09:47:02 +08:00
# 特征、模型、参数配置
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'本次预测使用了一个专门收集时间序列的NeuralForecast库中的{num_models}个模型:'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'使用40天的数据预测未来{inputsize}天的数据。'))
content.append(Graphs.draw_little_title('指标情况:'))
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, '特征频度统计.txt'), encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
for line in f.readlines():
content.append(Graphs.draw_text(line))
2025-03-05 09:47:02 +08:00
# 特征工程
2024-11-01 16:38:21 +08:00
# 计算特征相关性
# 读取数据
from scipy.stats import spearmanr
2025-03-05 09:47:02 +08:00
data = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'指标数据添加时间特征.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 重命名预测列
data.rename(columns={y: 'y'}, inplace=True) # 修改
data['ds'] = pd.to_datetime(data['ds']) # 修改
# 去掉ds列
data.drop(columns=['ds'], inplace=True)
# 创建一个空的 DataFrame 来保存相关系数
correlation_df = pd.DataFrame(columns=['Feature', 'Correlation'])
# 计算各特征与目标列的皮尔逊相关系数,并保存到新的 DataFrame 中
for col in data.columns:
2025-03-05 09:47:02 +08:00
if col != 'y':
2024-11-01 16:38:21 +08:00
pearson_correlation = np.corrcoef(data[col], data['y'])[0, 1]
spearman_correlation, _ = spearmanr(data[col], data['y'])
2025-03-05 09:47:02 +08:00
new_row = {'Feature': col, 'Pearson_Correlation': round(
pearson_correlation, 3), 'Spearman_Correlation': round(spearman_correlation, 2)}
2024-11-01 16:38:21 +08:00
correlation_df = correlation_df._append(new_row, ignore_index=True)
# 删除空列
correlation_df.drop('Correlation', axis=1, inplace=True)
correlation_df.dropna(inplace=True)
2025-03-11 11:25:43 +08:00
correlation_df.to_csv(os.path.join(config.dataset, '指标相关性分析.csv'), index=False)
2024-11-01 16:38:21 +08:00
data = correlation_df['Pearson_Correlation'].values.tolist()
# 生成 -1 到 1 的 20 个区间
bins = np.linspace(-1, 1, 21)
# 计算每个区间的统计数(这里是区间内数据的数量)
2025-03-05 09:47:02 +08:00
hist_values = [np.sum((data >= bins[i]) & (data < bins[i + 1]))
for i in range(len(bins) - 1)]
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 设置画布大小
2024-11-01 16:38:21 +08:00
plt.figure(figsize=(10, 6))
# 绘制直方图
plt.bar(bins[:-1], hist_values, width=(bins[1] - bins[0]))
# 添加标题和坐标轴标签
plt.title('皮尔逊相关系数分布图')
plt.xlabel('区间')
plt.ylabel('统计数')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '皮尔逊相关性系数.png'))
2024-11-01 16:38:21 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
# 设置画布大小
2024-11-01 16:38:21 +08:00
plt.figure(figsize=(10, 6))
data = correlation_df['Spearman_Correlation'].values.tolist()
# 计算每个区间的统计数(这里是区间内数据的数量)
2025-03-05 09:47:02 +08:00
hist_values = [np.sum((data >= bins[i]) & (data < bins[i + 1]))
for i in range(len(bins) - 1)]
2024-11-01 16:38:21 +08:00
# 绘制直方图
plt.bar(bins[:-1], hist_values, width=(bins[1] - bins[0]))
# 添加标题和坐标轴标签
plt.title('斯皮尔曼相关系数分布图')
plt.xlabel('区间')
plt.ylabel('统计数')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '斯皮尔曼相关性系数.png'))
2024-11-01 16:38:21 +08:00
plt.close()
content.append(Graphs.draw_text(f'指标相关性分析--皮尔逊相关系数:'))
# 皮尔逊正相关 不相关 负相关 的表格
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '皮尔逊相关性系数.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''皮尔逊相关系数说明:'''))
content.append(Graphs.draw_text('''衡量两个特征之间的线性相关性。'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('''
2025-03-05 09:47:02 +08:00
相关系数为1表示两个变量之间存在完全正向的线性关系即当一个变量增加时另一个变量也相应增加且变化是完全一致的'''))
content.append(Graphs.draw_text('''当前特征中正相关前十的有:'''))
top10_columns = correlation_df.sort_values(
by='Pearson_Correlation', ascending=False).head(10)['Feature'].to_list()
2024-11-01 16:38:21 +08:00
top10 = ','.join(top10_columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
# 获取特征的近一月值
2025-03-05 09:47:02 +08:00
feature_data_df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'填充后的特征数据.csv'), parse_dates=['ds']).tail(20)
2025-03-05 09:47:02 +08:00
feature_df = feature_data_df[['ds', 'y']+top10_columns]
2024-11-01 16:38:21 +08:00
# feature_df['ds'] = pd.to_datetime(df['ds'], format = '%Y-%m-%d' )
# 遍历X每一列和yy画散点图
for i, col in enumerate(feature_df.columns):
print(f'正在绘制第{i+1}个特征{col}与价格散点图...')
if col not in ['ds', 'y']:
fig, ax1 = plt.subplots(figsize=(10, 6))
# 在第一个坐标轴上绘制数据
ax1.plot(feature_df['ds'], feature_df['y'], 'b-')
ax1.set_xlabel('日期')
ax1.set_ylabel('y', color='b')
ax1.tick_params('y', colors='b')
# 在 ax1 上添加文本显示值,添加一定的偏移避免值与曲线重叠
2025-03-05 09:47:02 +08:00
for j in range(1, len(feature_df), 2):
2024-11-01 16:38:21 +08:00
value = feature_df['y'].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
2025-03-05 09:47:02 +08:00
ax1.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='b', fontsize=10)
2024-11-01 16:38:21 +08:00
# 创建第二个坐标轴
ax2 = ax1.twinx()
# 在第二个坐标轴上绘制数据
line2 = ax2.plot(feature_df['ds'], feature_df[col], 'r-')
ax2.set_ylabel(col, color='r')
ax2.tick_params('y', colors='r')
# 在 ax2 上添加文本显示值,添加一定的偏移避免值与曲线重叠
2025-03-05 09:47:02 +08:00
for j in range(0, len(feature_df), 2):
2024-11-01 16:38:21 +08:00
value = feature_df[col].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
2025-03-05 09:47:02 +08:00
ax2.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='r', fontsize=10)
2024-11-01 16:38:21 +08:00
# 添加标题
plt.title(col)
# 设置横坐标为日期格式并自动调整
locator = mdates.AutoDateLocator()
formatter = mdates.AutoDateFormatter(locator)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
# 文件名特殊字符处理
col = col.replace('*', '-')
col = col.replace(':', '-')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, f'{col}与价格散点图.png'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{col}与价格散点图.png')))
2024-11-01 16:38:21 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'''相关系数为-1表示两个变量之间存在完全负向的线性关系即当一个变量增加时另一个变量会相应减少且变化是完全相反的'''))
content.append(Graphs.draw_text('''当前特征中负相关前十的有:'''))
tail10_columns = correlation_df.sort_values(
by='Pearson_Correlation', ascending=True).head(10)['Feature'].to_list()
2024-11-01 16:38:21 +08:00
top10 = ','.join(tail10_columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
# 获取特征的近一周值
2025-03-05 09:47:02 +08:00
feature_df = feature_data_df[['ds', 'y']+tail10_columns]
2024-11-01 16:38:21 +08:00
# 遍历X每一列和yy画散点图
for i, col in enumerate(feature_df.columns):
print(f'正在绘制第{i+1}个特征{col}与价格散点图...')
if col not in ['ds', 'y']:
fig, ax1 = plt.subplots(figsize=(10, 6))
# 在第一个坐标轴上绘制数据
ax1.plot(feature_df['ds'], feature_df['y'], 'b-')
ax1.set_xlabel('日期')
ax1.set_ylabel('y', color='b')
ax1.tick_params('y', colors='b')
# 在 ax1 上添加文本显示值,添加一定的偏移避免值与曲线重叠
for j in range(len(feature_df)):
2025-03-05 09:47:02 +08:00
if j % 2 == 1:
2024-11-01 16:38:21 +08:00
value = feature_df['y'].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
2025-03-05 09:47:02 +08:00
ax1.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='b', fontsize=10)
2024-11-01 16:38:21 +08:00
# 创建第二个坐标轴
ax2 = ax1.twinx()
# 在第二个坐标轴上绘制数据
line2 = ax2.plot(feature_df['ds'], feature_df[col], 'r-')
ax2.set_ylabel(col, color='r')
ax2.tick_params('y', colors='r')
# 在 ax2 上添加文本显示值,添加一定的偏移避免值与曲线重叠
2025-03-05 09:47:02 +08:00
for j in range(1, len(feature_df), 2):
2024-11-01 16:38:21 +08:00
value = feature_df[col].iloc[j]
date = feature_df['ds'].iloc[j]
offset = 1.001
2025-03-05 09:47:02 +08:00
ax2.text(date, value * offset, str(round(value, 2)),
ha='center', va='bottom', color='r', fontsize=10)
2024-11-01 16:38:21 +08:00
# 添加标题
plt.title(col)
# 设置横坐标为日期格式并自动调整
locator = mdates.AutoDateLocator()
formatter = mdates.AutoDateFormatter(locator)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
# 文件名特殊字符处理
col = col.replace('*', '-')
col = col.replace(':', '-')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, f'{col}与价格散点图.png'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_img(
2025-03-11 11:25:43 +08:00
os.path.join(config.dataset, f'{col}与价格散点图.png')))
2024-11-01 16:38:21 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'指标相关性分析--斯皮尔曼相关系数:'))
# 皮尔逊正相关 不相关 负相关 的表格
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '斯皮尔曼相关性系数.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'斯皮尔曼相关系数Spearmans rank correlation coefficient是一种用于衡量两个变量之间的单调关系不一定是线性关系的统计指标。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('它的计算基于变量的秩次(即变量值的排序位置)而非变量的原始值。'))
content.append(Graphs.draw_text('斯皮尔曼相关系数的取值范围在 -1 到 1 之间。'))
content.append(Graphs.draw_text('当系数为 1 时,表示两个变量之间存在完全正的单调关系;'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''当前特征中正单调关系前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Spearman_Correlation', ascending=False).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('当系数为 -1 时,表示存在完全负的单调关系;'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''当前特征中负单调关系前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Spearman_Correlation', ascending=True).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('当系数为 0 时,表示两个变量之间不存在单调关系。'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'与皮尔逊相关系数相比,斯皮尔曼相关系数对于数据中的异常值不敏感,更适用于处理非线性关系或存在极端值的数据。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练拟合通过评估指标MAE从小到大排列前5个模型的简介如下'))
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 读取模型简介
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'model_introduction.txt'), 'r', encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
for line in f:
line_split = line.strip().split('--')
if line_split[0] in fivemodels_list:
for introduction in line_split:
content.append(Graphs.draw_text(introduction))
content.append(Graphs.draw_little_title('模型评估:'))
2025-03-05 09:47:02 +08:00
df = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'model_evaluation.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 判断 df 的数值列转为float
for col in eval_df.columns:
if col not in ['模型(Model)']:
eval_df[col] = eval_df[col].astype(float)
eval_df[col] = eval_df[col].round(3)
# 筛选 fivemodels_list.tolist() 的行
eval_df = eval_df[eval_df['模型(Model)'].isin(fivemodels_list)]
# df转置
eval_df = eval_df.T
# df重置索引
eval_df = eval_df.reset_index()
eval_df = eval_df.T
# # 添加表格
data = eval_df.values.tolist()
col_width = 500/len(eval_df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('评估指标释义:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'1. 均方根误差(RMSE):均方根误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'2. 平均绝对误差(MAE):平均绝对误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'3. 平均平方误差(MSE):平均平方误差是衡量预测值与实际值之间误差的一种方法,取值越小,误差越小,预测效果越好。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('模型拟合:'))
# 添加图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '预测值与真实值对比图.png')))
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 附1特征列表
content.append(Graphs.draw_little_title('附1、特征列表'))
2025-03-05 09:47:02 +08:00
df_fuyi = pd.read_csv(os.path.join(
2025-03-11 11:25:43 +08:00
config.dataset,'特征频度统计.csv'), encoding='utf-8')
2024-11-01 16:38:21 +08:00
for col in df_fuyi.columns:
fuyi = df_fuyi[col]
fuyi = fuyi.dropna()
content.append(Graphs.draw_text(f'{col}'))
for i in range(len(fuyi)):
content.append(Graphs.draw_text(f'{i+1}{fuyi[i]}'))
2025-03-05 09:47:02 +08:00
# 生成pdf文件
2025-03-11 11:25:43 +08:00
doc = SimpleDocTemplate(os.path.join(config.dataset, reportname), pagesize=letter)
# doc = SimpleDocTemplate(os.path.join(config.dataset,'reportname.pdf'), pagesize=letter)
2024-11-01 16:38:21 +08:00
doc.build(content)
# pdf 上传到数字化信息平台
# 读取pdf并转为base64
try:
if is_update_report:
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, reportname), 'rb') as f:
2024-11-01 16:38:21 +08:00
base64_data = base64.b64encode(f.read()).decode('utf-8')
upload_data["data"]["fileBase64"] = base64_data
upload_data["data"]["fileName"] = reportname
token = get_head_auth_report()
upload_report_data(token, upload_data)
except TimeoutError as e:
print(f"请求超时: {e}")
2024-12-18 17:49:23 +08:00
2025-03-05 09:47:02 +08:00
2024-12-18 17:49:23 +08:00
@exception_logger
2025-03-05 09:47:02 +08:00
def tansuanli_export_pdf(num_indicators=475, num_models=22, num_dayindicator=202, inputsize=5, dataset='dataset', y='电碳价格', end_time='2024-07-30', reportname='tansuanli.pdf'):
2024-11-01 16:38:21 +08:00
# 创建内容对应的空列表
content = list()
2025-03-05 09:47:02 +08:00
# 添加标题
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_title(f'{y}{end_time}预测报告'))
2025-03-05 09:47:02 +08:00
# 预测结果
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('一、预测结果:'))
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '历史价格-预测值.png')))
2024-11-01 16:38:21 +08:00
# 取df中y列为空的行
from lib.dataread import loadcsv
2025-03-11 11:25:43 +08:00
df = loadcsv(os.path.join(config.dataset, 'predict.csv'))
df_true = loadcsv(os.path.join(config.dataset, '指标数据添加时间特征.csv')) # 获取预测日期对应的真实值
2025-03-05 09:47:02 +08:00
df_true = df_true[['ds', 'y']]
2025-03-11 11:25:43 +08:00
eval_df = loadcsv(os.path.join(config.dataset, 'model_evaluation.csv'))
2024-11-01 16:38:21 +08:00
# 按评估指标排序,取前五
fivemodels_list = eval_df['模型(Model)'].values[:5] # 列表形式,后面当作列名索引使用
# 取 fivemodels_list 和 ds 列
2025-03-05 09:47:02 +08:00
df = df[['ds'] + fivemodels_list.tolist()]
2024-11-01 16:38:21 +08:00
# 拼接预测日期对应的真实值
df = pd.merge(df, df_true, on='ds', how='left')
# 删除全部为nan的列
df = df.dropna(how='all', axis=1)
# 选择除 'ds' 列外的数值列,并进行类型转换和四舍五入
2025-03-05 09:47:02 +08:00
num_cols = [col for col in df.columns if col !=
'ds' and pd.api.types.is_numeric_dtype(df[col])]
2024-11-01 16:38:21 +08:00
for col in num_cols:
df[col] = df[col].astype(float).round(2)
# 添加预测每日的最大值、最小值、平均值三列
df['平均值'] = df[num_cols].mean(axis=1).round(2)
df['最大值'] = df[num_cols].max(axis=1)
df['最小值'] = df[num_cols].min(axis=1)
# 添加模型预测周期内的最大值、最小值、平均值三行
# 计算列的统计值
mean_values = df[num_cols].mean(axis=0).round(2)
max_values = df[num_cols].max(axis=0)
min_values = df[num_cols].min(axis=0)
# 创建一个新的 DataFrame 来存储统计行
2025-03-05 09:47:02 +08:00
stats_row = pd.DataFrame(
[mean_values, max_values, min_values], index=[0, 1, 2])
2024-11-01 16:38:21 +08:00
stats_row['ds'] = ['平均值', '最大值', '最小值']
# 将统计行添加到原始 DataFrame
df = pd.concat([df, stats_row], axis=0)
# df替换nan 为 '--'
df = df.fillna('--')
# df转置
df = df.T
# df重置索引
df = df.reset_index()
# 添加预测值表格
data = df.values.tolist()
col_width = 500/len(df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('二、上一预测周期偏差率分析:'))
2025-03-11 11:25:43 +08:00
df = loadcsv(os.path.join(config.dataset, 'testandpredict_groupby.csv'))
2025-03-05 09:47:02 +08:00
df4 = df.copy() # 计算偏差率使用
2024-11-01 16:38:21 +08:00
# 计算模型偏差率
2025-03-05 09:47:02 +08:00
# 计算各列对于y列的差值百分比
2024-11-01 16:38:21 +08:00
df3 = pd.DataFrame() # 存储偏差率
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 删除有null的行
df4 = df4.dropna()
df3['ds'] = df4['ds']
for col in df.columns:
2025-03-05 09:47:02 +08:00
if col not in ['y', 'ds', 'index']:
df3[col] = round(abs(df4[col] - df4['y']) / df4['y'] * 100, 2)
2024-11-01 16:38:21 +08:00
# 找出决定系数前五的偏差率
df3 = df3[['ds']+fivemodels_list.tolist()][-inputsize:]
# 找出上一预测区间的时间
stime = df3['ds'].iloc[0]
etime = df3['ds'].iloc[-1]
# 添加偏差率表格
2025-03-05 09:47:02 +08:00
fivemodels = ''.join(eval_df['模型(Model)'].values[:5]) # 字符串形式,后面写入字符串使用
content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练使用评估结果MAE前五的模型分别是 {fivemodels} ,模型上一预测区间 {stime} -- {etime}的偏差率(%)分别是:'))
2024-11-01 16:38:21 +08:00
# # 添加偏差率表格
df3 = df3.T
df3 = df3.reset_index()
df3 = df3.T
data = df3.values.tolist()
col_width = 500/len(df3.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('三、预测过程解析:'))
2025-03-05 09:47:02 +08:00
# 特征、模型、参数配置
content.append(Graphs.draw_text(
f'本次预测使用了给定的28个指标列名重复的排除后作为特征应用了一个专门收集时间序列的NeuralForecast库中的{num_models}个模型。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'使用10天的数据预测未来{inputsize}天的数据。'))
content.append(Graphs.draw_little_title('指标情况:'))
content.append(Graphs.draw_text(' 指标频度包括'))
# 添加频度统计表格
2025-03-11 11:25:43 +08:00
pindu_df = loadcsv(os.path.join(config.dataset, '特征频度统计.csv'))
2024-11-01 16:38:21 +08:00
pindu_df.fillna('-', inplace=True)
pindu_df = pindu_df.T
pindu_df = pindu_df.reset_index()
pindu_df = pindu_df.T
data = pindu_df.values.tolist()
col_width = 500/len(pindu_df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
content.append(Graphs.draw_text(
f'从指标特征的频度信息来看,月度指标占比最高,而我们需要进行预测的指标为日度的,所以本数据集中月度和周度指标需要进行插值处理。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(' 数据特征工程:'))
content.append(Graphs.draw_text('1. 数据日期排序,新日期在最后'))
content.append(Graphs.draw_text('2. 删除空列,特征数据列没有值,就删除'))
content.append(Graphs.draw_text('3. 周度、月度特征填充为日度数据,填充规则:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
' -- 向后填充,举例:假设周五出现一个周度指标数据,那么在这之前的数据用上周五的数据'))
content.append(Graphs.draw_text(
' -- 向前填充举例采集数据开始日期为2018年1月1日那么周度数据可能是2018年1月3日那么3日的数据向前填充使1日2日都有数值'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'以上处理其实并不合理,但结合我们想要的结果,我们选择了这种处理方式。'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
f'一般来讲,指标数据的频度和预测列是一致的,我们可以考虑预测月度的目标列,不过这样的话,月度数据太少了,不足以用来训练模型。'))
# 特征工程
2024-11-01 16:38:21 +08:00
# 预测列分析
content.append(Graphs.draw_text(' 电碳价格自相关ACF和偏自相关PACF分析'))
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '指标数据自相关图.png')))
content.append(Graphs.draw_img(os.path.join(config.dataset, '指标数据偏自相关图.png')))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(' 解读:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
' 自相关函数的取值范围为 [-1, 1]。正值表示信号在不同时间点之间具有正相关性,负值表示信号具有负相关性,而 0 表示信号在不同时间点之间不相关。 '))
content.append(Graphs.draw_text(
' 偏自相关函数PACF则是在控制了中间的滞后项影响后特定滞后项与当前项的相关性。 '))
content.append(Graphs.draw_text(
' 当前目标列表现出的 ACF 呈现出拖尾的特征,而 PACF 在1个滞后阶数后截尾这说明目标值适合使用自回归AR模型 '))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(' 数据特征可视化分析:'))
# 找出所有后缀为散点图.png的文件
import glob
2025-03-11 11:25:43 +08:00
scatter_files = glob.glob(os.path.join(config.dataset, '*散点图.png'))
2024-11-01 16:38:21 +08:00
for file in scatter_files:
content.append(Graphs.draw_img(file))
content.append(Graphs.draw_text(' 解读:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
' 观察特征与目标列的散点图,我们可以直观的感受到特征与我们要预测的列没有明显的趋势相关,需要考虑选取的特征合理。 '))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(' 数据特征相关性分析:'))
# 计算特征相关性
# 读取数据
from scipy.stats import spearmanr
2025-03-11 11:25:43 +08:00
data = loadcsv(os.path.join(config.dataset, '指标数据添加时间特征.csv'))
2024-11-01 16:38:21 +08:00
# 重命名预测列
data.rename(columns={y: 'y'}, inplace=True) # 修改
from lib.tools import dateConvert
data = dateConvert(data) # 修改
# 去掉ds列
data.drop(columns=['ds'], inplace=True)
# 创建一个空的 DataFrame 来保存相关系数
correlation_df = pd.DataFrame(columns=['Feature', 'Correlation'])
# 计算各特征与目标列的皮尔逊相关系数,并保存到新的 DataFrame 中
for col in data.columns:
2025-03-05 09:47:02 +08:00
if col != 'y':
2024-11-01 16:38:21 +08:00
pearson_correlation = np.corrcoef(data[col], data['y'])[0, 1]
spearman_correlation, _ = spearmanr(data[col], data['y'])
2025-03-05 09:47:02 +08:00
new_row = {'Feature': col, 'Pearson_Correlation': round(
pearson_correlation, 3), 'Spearman_Correlation': round(spearman_correlation, 2)}
2024-11-01 16:38:21 +08:00
correlation_df = correlation_df._append(new_row, ignore_index=True)
# 删除空列
correlation_df.drop('Correlation', axis=1, inplace=True)
correlation_df.dropna(inplace=True)
2025-03-11 11:25:43 +08:00
correlation_df.to_csv(os.path.join(config.dataset, '指标相关性分析.csv'), index=False)
2024-11-01 16:38:21 +08:00
data = correlation_df['Pearson_Correlation'].values.tolist()
# 生成 -1 到 1 的 20 个区间
bins = np.linspace(-1, 1, 21)
# 计算每个区间的统计数(这里是区间内数据的数量)
2025-03-05 09:47:02 +08:00
hist_values = [np.sum((data >= bins[i]) & (data < bins[i + 1]))
for i in range(len(bins) - 1)]
# 设置画布大小
2024-11-01 16:38:21 +08:00
plt.figure(figsize=(10, 6))
# 绘制直方图
plt.bar(bins[:-1], hist_values, width=(bins[1] - bins[0]))
# 添加标题和坐标轴标签
plt.title('皮尔逊相关系数分布图')
plt.xlabel('区间')
plt.ylabel('统计数')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '皮尔逊相关性系数.png'))
2024-11-01 16:38:21 +08:00
plt.close()
2025-03-05 09:47:02 +08:00
# 设置画布大小
2024-11-01 16:38:21 +08:00
plt.figure(figsize=(10, 6))
data = correlation_df['Spearman_Correlation'].values.tolist()
# 计算每个区间的统计数(这里是区间内数据的数量)
2025-03-05 09:47:02 +08:00
hist_values = [np.sum((data >= bins[i]) & (data < bins[i + 1]))
for i in range(len(bins) - 1)]
2024-11-01 16:38:21 +08:00
# 绘制直方图
plt.bar(bins[:-1], hist_values, width=(bins[1] - bins[0]))
# 添加标题和坐标轴标签
plt.title('斯皮尔曼相关系数分布图')
plt.xlabel('区间')
plt.ylabel('统计数')
2025-03-11 11:25:43 +08:00
plt.savefig(os.path.join(config.dataset, '斯皮尔曼相关性系数.png'))
2024-11-01 16:38:21 +08:00
plt.close()
content.append(Graphs.draw_text(f'指标相关性分析--皮尔逊相关系数:'))
# 皮尔逊正相关 不相关 负相关 的表格
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '皮尔逊相关性系数.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''皮尔逊相关系数说明:'''))
content.append(Graphs.draw_text('''衡量两个特征之间的线性相关性。'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('''
2025-03-05 09:47:02 +08:00
相关系数为1表示两个变量之间存在完全正向的线性关系即当一个变量增加时另一个变量也相应增加且变化是完全一致的'''))
content.append(Graphs.draw_text('''当前特征中正相关前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Pearson_Correlation', ascending=False).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
content.append(Graphs.draw_text(
'''相关系数为-1表示两个变量之间存在完全负向的线性关系即当一个变量增加时另一个变量会相应减少且变化是完全相反的'''))
content.append(Graphs.draw_text('''当前特征中负相关前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Pearson_Correlation', ascending=True).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
content.append(Graphs.draw_text(
'''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text(f'指标相关性分析--斯皮尔曼相关系数:'))
# 皮尔逊正相关 不相关 负相关 的表格
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '斯皮尔曼相关性系数.png')))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'斯皮尔曼相关系数Spearmans rank correlation coefficient是一种用于衡量两个变量之间的单调关系不一定是线性关系的统计指标。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('它的计算基于变量的秩次(即变量值的排序位置)而非变量的原始值。'))
content.append(Graphs.draw_text('斯皮尔曼相关系数的取值范围在 -1 到 1 之间。'))
content.append(Graphs.draw_text('当系数为 1 时,表示两个变量之间存在完全正的单调关系;'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''当前特征中正单调关系前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Spearman_Correlation', ascending=False).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('当系数为 -1 时,表示存在完全负的单调关系;'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text('''当前特征中负单调关系前十的有:'''))
top10 = ','.join(correlation_df.sort_values(
by='Spearman_Correlation', ascending=True).head(10)['Feature'])
content.append(Graphs.draw_text(f'''{top10}'''))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('当系数为 0 时,表示两个变量之间不存在单调关系。'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'与皮尔逊相关系数相比,斯皮尔曼相关系数对于数据中的异常值不敏感,更适用于处理非线性关系或存在极端值的数据。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_little_title('模型选择:'))
content.append(Graphs.draw_text(f'预测使用了{num_models}个模型进行训练拟合,模型的简介如下:'))
2025-03-05 09:47:02 +08:00
# 读取模型简介
2025-03-11 11:25:43 +08:00
with open(os.path.join(config.dataset, 'model_introduction.txt'), 'r', encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
for line in f:
line_split = line.strip().split('--')
# if line_split[0] in fivemodels_list:
for introduction in line_split:
content.append(Graphs.draw_text(introduction))
content.append(Graphs.draw_little_title('模型评估:'))
content.append(Graphs.draw_text(f'通过评估指标MAE从小到大排列前5个模型的评估详情如下'))
2025-03-11 11:25:43 +08:00
df = loadcsv(os.path.join(config.dataset, 'model_evaluation.csv'))
2024-11-01 16:38:21 +08:00
# 判断 df 的数值列转为float
for col in eval_df.columns:
if col not in ['模型(Model)']:
eval_df[col] = eval_df[col].astype(float)
eval_df[col] = eval_df[col].round(3)
# 筛选 fivemodels_list.tolist() 的行
eval_df = eval_df[eval_df['模型(Model)'].isin(fivemodels_list)]
# df转置
eval_df = eval_df.T
# df重置索引
eval_df = eval_df.reset_index()
eval_df = eval_df.T
# # 添加表格
data = eval_df.values.tolist()
col_width = 500/len(eval_df.columns)
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_table(col_width, *data))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('评估指标释义:'))
2025-03-05 09:47:02 +08:00
content.append(Graphs.draw_text(
'1. 均方根误差(RMSE):均方根误差是衡量预测值与实际值之间误差的一种方法,先计算预测值与真实值的差值的平方,然后对这些平方差求平均值,最后取平均值的平方根。取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'2. 平均绝对误差(MAE):平均绝对误差是衡量预测值与实际值之间误差的一种方法,对预测值与真实值之间差值的绝对值进行求和,然后除以样本数量。取值越小,误差越小,预测效果越好。'))
content.append(Graphs.draw_text(
'3. 平均平方误差(MSE):平均平方误差是衡量预测值与实际值之间误差的一种方法,先计算预测值与真实值之差的平方,然后对这些平方差求平均值。取值越小,误差越小,预测效果越好。'))
2024-11-01 16:38:21 +08:00
content.append(Graphs.draw_text('模型拟合:'))
# 添加图片
2025-03-11 11:25:43 +08:00
content.append(Graphs.draw_img(os.path.join(config.dataset, '预测值与真实值对比图.png')))
2025-03-05 09:47:02 +08:00
# 生成pdf文件
2025-03-11 11:25:43 +08:00
doc = SimpleDocTemplate(os.path.join(config.dataset, reportname), pagesize=letter)
2024-11-01 16:38:21 +08:00
doc.build(content)