聚烯烃测试环境调通
This commit is contained in:
parent
7244394871
commit
a6de32a809
@ -105,6 +105,7 @@ modelsindex = {
|
||||
'DeepNPTS':'SELF0000076'
|
||||
}
|
||||
|
||||
|
||||
# eta 上传预测结果的请求体,后面发起请求的时候更改 model datalist 数据
|
||||
data = {
|
||||
"IndexCode": "",
|
||||
@ -125,58 +126,25 @@ data = {
|
||||
# level:3才可以获取到数据,所以需要人工把能源化工下所有的level3级都找到
|
||||
# url = 'http://10.189.2.78:8108/v1/edb/list?ClassifyId=1214'
|
||||
#ParentId ":1160, 能源化工
|
||||
# ClassifyId ":1214,原油 ,1161 PP
|
||||
# ClassifyId ":1214,原油
|
||||
#ParentId ":1214,",就是原油下所有的数据。
|
||||
ClassifyId = 1161
|
||||
|
||||
|
||||
### 报告上传配置
|
||||
# 变量定义--线上环境
|
||||
# login_pushreport_url = "http://10.200.32.39/jingbo-api/api/server/login"
|
||||
# upload_url = "http://10.200.32.39/jingbo-api/api/dw/dataValue/pushDataValueList"
|
||||
|
||||
############################################################################################################### 变量定义--测试环境
|
||||
server_host = '192.168.100.53'
|
||||
|
||||
# login_data = {
|
||||
# "data": {
|
||||
# "account": "api_dev",
|
||||
# "password": "ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=",
|
||||
# "tenantHashCode": "8a4577dbd919675758d57999a1e891fe",
|
||||
# "terminal": "API"
|
||||
# },
|
||||
# "funcModule": "API",
|
||||
# "funcOperation": "获取token"
|
||||
# }
|
||||
|
||||
|
||||
|
||||
# upload_data = {
|
||||
# "funcModule":'研究报告信息',
|
||||
# "funcOperation":'上传原油价格预测报告',
|
||||
# "data":{
|
||||
# "ownerAccount":'27663', #报告所属用户账号 27663 - 刘小朋
|
||||
# "reportType":'OIL_PRICE_FORECAST', # 报告类型,固定为OIL_PRICE_FORECAST
|
||||
# "fileName": '', #文件名称
|
||||
# "fileBase64": '' ,#文件内容base64
|
||||
# "categoryNo":'yyjgycbg', # 研究报告分类编码
|
||||
# "smartBusinessClassCode":'YCJGYCBG', #分析报告分类编码
|
||||
# "reportEmployeeCode":"E40482" ,# 报告人 E40482 - 管理员 0000027663 - 刘小朋
|
||||
# "reportDeptCode" :"002000621000", # 报告部门 - 002000621000 SH期货研究部
|
||||
# "productGroupCode":"RAW_MATERIAL" # 商品分类
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
|
||||
# # 变量定义--测试环境
|
||||
login_pushreport_url = "http://192.168.100.53:8080/jingbo-dev/api/server/login"
|
||||
upload_url = "http://192.168.100.53:8080/jingbo-dev/api/analysis/reportInfo/researchUploadReportSave"
|
||||
# upload_url = "http://192.168.100.109:8080/jingbo/api/analysis/reportInfo/researchUploadReportSave" # zhaoqiwei
|
||||
|
||||
login_pushreport_url = f"http://{server_host}:8080/jingbo-dev/api/server/login"
|
||||
upload_url = f"http://{server_host}:8080/jingbo-dev/api/analysis/reportInfo/researchUploadReportSave"
|
||||
upload_warning_url = f"http://{server_host}:8080/jingbo-dev/api/basicBuiness/crudeOilWarning/save"
|
||||
query_data_list_item_nos_url = f"http://{server_host}:8080/jingbo-dev/api/warehouse/dwDataItem/queryDataListItemNos"
|
||||
|
||||
login_data = {
|
||||
"data": {
|
||||
"account": "api_test",
|
||||
"password": "MmVmNzNlOWI0MmY0ZDdjZGUwNzE3ZjFiMDJiZDZjZWU=",
|
||||
# "password": "MmVmNzNlOWI0MmY0ZDdjZGUwNzE3ZjFiMDJiZDZjZWU=", # Shihua@123456
|
||||
"password": "ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=", # 123456
|
||||
"tenantHashCode": "8a4577dbd919675758d57999a1e891fe",
|
||||
"terminal": "API"
|
||||
},
|
||||
@ -201,40 +169,61 @@ upload_data = {
|
||||
}
|
||||
|
||||
|
||||
### 线上开关
|
||||
# is_train = True # 是否训练
|
||||
# is_debug = False # 是否调试
|
||||
# is_eta = True # 是否使用eta接口
|
||||
# is_timefurture = True # 是否使用时间特征
|
||||
# is_fivemodels = False # 是否使用之前保存的最佳的5个模型
|
||||
# is_edbcode = False # 特征使用edbcoding列表中的
|
||||
# is_edbnamelist = False # 自定义特征,对应上面的edbnamelist
|
||||
# is_update_report = True # 是否上传报告
|
||||
# is_del_corr = 0 # 是否删除相关性高的特征,取值为 0-1 ,0 为不删除,0.6 表示删除相关性小于0.6的特征
|
||||
warning_data = {
|
||||
"funcModule":'原油特征停更预警',
|
||||
"funcOperation":'原油特征停更预警',
|
||||
"data":{
|
||||
'WARNING_TYPE_NAME':'特征数据停更预警',
|
||||
'WARNING_CONTENT':'',
|
||||
'WARNING_DATE':''
|
||||
}
|
||||
}
|
||||
|
||||
query_data_list_item_nos_data = {
|
||||
"funcModule": "数据项",
|
||||
"funcOperation": "查询",
|
||||
"data": {
|
||||
"dateStart":"20200101",
|
||||
"dateEnd":"20241231",
|
||||
"dataItemNoList":["Brentzdj","Brentzgj"] # 数据项编码,代表 brent最低价和最高价
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 北京环境数据库
|
||||
host = '192.168.101.27'
|
||||
port = 3306
|
||||
dbusername ='root'
|
||||
password = '123456'
|
||||
dbname = 'jingbo_test'
|
||||
table_name = 'v_tbl_crude_oil_warning'
|
||||
|
||||
|
||||
### 开关
|
||||
is_train = False # 是否训练
|
||||
is_debug = False # 是否调试
|
||||
is_eta = False # 是否使用eta接口
|
||||
is_market = False # 是否通过市场信息平台获取特征 ,在is_eta 为true 的情况下生效
|
||||
is_timefurture = True # 是否使用时间特征
|
||||
is_fivemodels = False # 是否使用之前保存的最佳的5个模型
|
||||
is_edbcode = False # 特征使用edbcoding列表中的
|
||||
is_edbnamelist = False # 自定义特征,对应上面的edbnamelist
|
||||
is_update_eta = False # 预测结果上传到eta
|
||||
is_update_report = False # 是否上传报告
|
||||
is_del_corr = 0 # 是否删除相关性高的特征,取值为 0-1 ,0 为不删除,0.6 表示删除相关性小于0.6的特征
|
||||
is_update_warning_data = False # 是否上传预警数据
|
||||
is_del_corr = 0.6 # 是否删除相关性高的特征,取值为 0-1 ,0 为不删除,0.6 表示删除相关性小于0.6的特征
|
||||
is_del_tow_month = True # 是否删除两个月不更新的特征
|
||||
|
||||
|
||||
|
||||
# 连接到数据库
|
||||
# db_mysql = MySQLDB(host=host, user=dbusername, password=password, database=dbname)
|
||||
# db_mysql.connect()
|
||||
# print("数据库连接成功",host,dbname,dbusername)
|
||||
db_mysql = MySQLDB(host=host, user=dbusername, password=password, database=dbname)
|
||||
db_mysql.connect()
|
||||
print("数据库连接成功",host,dbname,dbusername)
|
||||
|
||||
|
||||
# 数据截取日期
|
||||
start_year = 2017 # 数据开始年份
|
||||
start_year = 2020 # 数据开始年份
|
||||
end_time = '' # 数据截取日期
|
||||
freq = 'B' # 时间频率,"D": 天 "W": 周"M": 月"Q": 季度"A": 年 "H": 小时 "T": 分钟 "S": 秒 "B": 工作日
|
||||
delweekenday = True if freq == 'B' else False # 是否删除周末数据
|
||||
@ -242,9 +231,9 @@ is_corr = False # 特征是否参与滞后领先提升相关系数
|
||||
add_kdj = False # 是否添加kdj指标
|
||||
if add_kdj and is_edbnamelist:
|
||||
edbnamelist = edbnamelist+['K','D','J']
|
||||
|
||||
### 模型参数
|
||||
# y = 'PP:拉丝:1102K:市场价:青州:国家能源宁煤(日)' # 原油指标数据的目标变量
|
||||
y = 'AVG-金能大唐久泰青州' # 原油指标数据的目标变量
|
||||
y = 'AVG-金能大唐久泰青州'
|
||||
avg_cols = [
|
||||
'PP:拉丝:1102K:出厂价:青州:国家能源宁煤(日)',
|
||||
'PP:拉丝:L5E89:出厂价:华北(第二区域):内蒙古久泰新材料(日)',
|
||||
@ -264,14 +253,15 @@ val_size = test_size # 验证集大小,同测试集大小
|
||||
|
||||
### 特征筛选用到的参数
|
||||
k = 100 # 特征筛选数量,如果是0或者值比特征数量大,代表全部特征
|
||||
corr_threshold = 0.6 # 相关性大于0.6的特征
|
||||
rote = 0.06 # 绘图上下界阈值
|
||||
|
||||
### 计算准确率
|
||||
weight_dict = [0.4,0.15,0.1,0.1,0.25] # 权重
|
||||
|
||||
# 绘图预测图上下边界使用的阈值,可以根据实际情况调整
|
||||
rote = 0.04
|
||||
|
||||
### 文件
|
||||
data_set = 'PP指标数据.xlsx' # 数据集文件
|
||||
# data_set = 'INE_OIL(1).csv'
|
||||
### 文件夹
|
||||
dataset = 'juxitingdataset' # 数据集文件夹
|
||||
|
||||
# 数据库名称
|
||||
@ -281,18 +271,21 @@ sqlitedb.connect()
|
||||
|
||||
settings = f'{input_size}-{horizon}-{train_steps}--{k}-{data_set}-{y}'
|
||||
# 获取日期时间
|
||||
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') # 获取当前日期时间
|
||||
reportname = f'PP--{now}-预测报告.pdf' # 报告文件名
|
||||
# now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') # 获取当前日期时间
|
||||
now = datetime.datetime.now().strftime('%Y-%m-%d') # 获取当前日期时间
|
||||
reportname = f'PP大模型预测报告--{end_time}.pdf' # 报告文件名
|
||||
reportname = reportname.replace(':', '-') # 替换冒号
|
||||
|
||||
if end_time == '':
|
||||
end_time = now
|
||||
### 邮件配置
|
||||
username='1321340118@qq.com'
|
||||
passwd='wgczgyhtyyyyjghi'
|
||||
# recv=['liurui_test@163.com']
|
||||
# recv=['liurui_test@163.com','52585119@qq.com']
|
||||
recv=['liurui_test@163.com']
|
||||
title=reportname
|
||||
# recv=['liurui_test@163.com']
|
||||
title='reportname'
|
||||
content=y+'预测报告请看附件'
|
||||
file=os.path.join(dataset,reportname)
|
||||
file=os.path.join(dataset,'reportname')
|
||||
# file=os.path.join(dataset,'14-7-50--100-原油指标数据.xlsx-Brent连1合约价格--20240731175936-预测报告.pdf')
|
||||
ssl=True
|
||||
|
||||
@ -321,3 +314,4 @@ logger.addHandler(file_handler)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# logger.info('当前配置:'+settings)
|
||||
|
||||
|
@ -845,6 +845,7 @@ def getdata(filename, datecol='date',y='y',dataset='',add_kdj=False,is_timefurtu
|
||||
df = datachuli(df_zhibiaoshuju,df_zhibiaoliebiao,datecol,y = y,dataset=dataset,add_kdj=add_kdj,is_timefurture=is_timefurture,end_time=end_time)
|
||||
|
||||
return df,df_zhibiaoliebiao
|
||||
|
||||
def getdata_juxiting(filename, datecol='date',y='y',dataset='',add_kdj=False,is_timefurture=False,end_time=''):
|
||||
logger.info('getdata接收:'+filename+' '+datecol+' '+end_time)
|
||||
# 判断后缀名 csv或excel
|
||||
@ -858,7 +859,7 @@ def getdata_juxiting(filename, datecol='date',y='y',dataset='',add_kdj=False,is_
|
||||
# 日期字符串转为datatime
|
||||
df = datachuli_juxiting(df_zhibiaoshuju,df_zhibiaoliebiao,datecol,y = y,dataset=dataset,add_kdj=add_kdj,is_timefurture=is_timefurture,end_time=end_time)
|
||||
|
||||
return df
|
||||
return df,df_zhibiaoliebiao
|
||||
|
||||
|
||||
def sanitize_filename(filename):
|
||||
|
174
main_juxiting.py
174
main_juxiting.py
@ -1,16 +1,54 @@
|
||||
# 读取配置
|
||||
from lib.dataread import *
|
||||
from lib.tools import *
|
||||
from models.nerulforcastmodels import ex_Model,model_losss,brent_export_pdf,tansuanli_export_pdf,pp_export_pdf,model_losss_juxiting
|
||||
from lib.tools import SendMail,exception_logger
|
||||
from models.nerulforcastmodels import ex_Model,model_losss,model_losss_juxiting,brent_export_pdf,tansuanli_export_pdf,pp_export_pdf,model_losss_juxiting
|
||||
|
||||
import glob
|
||||
import torch
|
||||
torch.set_float32_matmul_precision("high")
|
||||
|
||||
sqlitedb = SQLiteHandler(db_name)
|
||||
sqlitedb.connect()
|
||||
|
||||
|
||||
def predict_main():
|
||||
"""
|
||||
主预测函数,用于从 ETA 获取数据、处理数据、训练模型并进行预测。
|
||||
|
||||
参数:
|
||||
signature (BinanceAPI): Binance API 实例。
|
||||
etadata (EtaReader): ETA 数据读取器实例。
|
||||
is_eta (bool): 是否从 ETA 获取数据。
|
||||
data_set (str): 数据集名称。
|
||||
dataset (str): 数据集路径。
|
||||
add_kdj (bool): 是否添加 KDJ 指标。
|
||||
is_timefurture (bool): 是否添加时间衍生特征。
|
||||
end_time (str): 结束时间。
|
||||
is_edbnamelist (bool): 是否使用 EDB 名称列表。
|
||||
edbnamelist (list): EDB 名称列表。
|
||||
y (str): 预测目标列名。
|
||||
sqlitedb (SQLiteDB): SQLite 数据库实例。
|
||||
is_corr (bool): 是否进行相关性分析。
|
||||
horizon (int): 预测时域。
|
||||
input_size (int): 输入数据大小。
|
||||
train_steps (int): 训练步数。
|
||||
val_check_steps (int): 验证检查步数。
|
||||
early_stop_patience_steps (int): 早停耐心步数。
|
||||
is_debug (bool): 是否调试模式。
|
||||
dataset (str): 数据集名称。
|
||||
is_train (bool): 是否训练模型。
|
||||
is_fivemodels (bool): 是否使用五个模型。
|
||||
val_size (float): 验证集大小。
|
||||
test_size (float): 测试集大小。
|
||||
settings (dict): 模型设置。
|
||||
now (str): 当前时间。
|
||||
etadata (EtaReader): ETA 数据读取器实例。
|
||||
modelsindex (list): 模型索引列表。
|
||||
data (str): 数据类型。
|
||||
is_eta (bool): 是否从 ETA 获取数据。
|
||||
|
||||
返回:
|
||||
None
|
||||
"""
|
||||
global end_time
|
||||
signature = BinanceAPI(APPID, SECRET)
|
||||
etadata = EtaReader(signature=signature,
|
||||
classifylisturl=classifylisturl,
|
||||
@ -23,7 +61,6 @@ def predict_main():
|
||||
)
|
||||
# 获取数据
|
||||
if is_eta:
|
||||
# eta数据
|
||||
logger.info('从eta获取数据...')
|
||||
signature = BinanceAPI(APPID, SECRET)
|
||||
etadata = EtaReader(signature=signature,
|
||||
@ -37,12 +74,35 @@ def predict_main():
|
||||
)
|
||||
df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_pp_data(data_set=data_set, dataset=dataset) # 原始数据,未处理
|
||||
|
||||
if is_market:
|
||||
logger.info('从市场信息平台获取数据...')
|
||||
try:
|
||||
# 如果是测试环境,最高价最低价取excel文档
|
||||
if server_host == '192.168.100.53':
|
||||
logger.info('从excel文档获取最高价最低价')
|
||||
df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju)
|
||||
else:
|
||||
logger.info('从市场信息平台获取数据')
|
||||
df_zhibiaoshuju = get_market_data(end_time,df_zhibiaoshuju)
|
||||
|
||||
except :
|
||||
logger.info('最高最低价拼接失败')
|
||||
|
||||
# 保存到xlsx文件的sheet表
|
||||
with pd.ExcelWriter(os.path.join(dataset,data_set)) as file:
|
||||
df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False)
|
||||
df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False)
|
||||
|
||||
|
||||
# 数据处理
|
||||
df = datachuli_juxiting(df_zhibiaoshuju,df_zhibiaoliebiao,y = y,dataset=dataset,add_kdj=add_kdj,is_timefurture=is_timefurture,end_time=end_time)
|
||||
df = datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, y=y, dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture,
|
||||
end_time=end_time)
|
||||
|
||||
else:
|
||||
# 读取数据
|
||||
logger.info('读取本地数据:' + os.path.join(dataset, data_set))
|
||||
df = getdata_juxiting(filename=os.path.join(dataset,data_set),y=y,dataset=dataset,add_kdj=add_kdj,is_timefurture=is_timefurture,end_time=end_time) # 原始数据,未处理
|
||||
df,df_zhibiaoliebiao = getdata_juxiting(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
|
||||
is_timefurture=is_timefurture, end_time=end_time) # 原始数据,未处理
|
||||
|
||||
# 更改预测列名称
|
||||
df.rename(columns={y: 'y'}, inplace=True)
|
||||
@ -53,6 +113,11 @@ def predict_main():
|
||||
# 保存最新日期的y值到数据库
|
||||
# 取第一行数据存储到数据库中
|
||||
first_row = df[['ds', 'y']].tail(1)
|
||||
# 判断y的类型是否为float
|
||||
if not isinstance(first_row['y'].values[0], float):
|
||||
logger.info(f'{end_time}预测目标数据为空,跳过')
|
||||
return None
|
||||
|
||||
# 将最新真实值保存到数据库
|
||||
if not sqlitedb.check_table_exists('trueandpredict'):
|
||||
first_row.to_sql('trueandpredict', sqlitedb.connection, index=False)
|
||||
@ -67,24 +132,47 @@ def predict_main():
|
||||
continue
|
||||
sqlitedb.insert_data('trueandpredict', tuple(row_dict.values()), columns=row_dict.keys())
|
||||
|
||||
# 更新accuracy表的y值
|
||||
if not sqlitedb.check_table_exists('accuracy'):
|
||||
pass
|
||||
else:
|
||||
update_y = sqlitedb.select_data('accuracy',where_condition="y is null")
|
||||
if len(update_y) > 0:
|
||||
logger.info('更新accuracy表的y值')
|
||||
# 找到update_y 中ds且df中的y的行
|
||||
update_y = update_y[update_y['ds']<=end_time]
|
||||
logger.info(f'要更新y的信息:{update_y}')
|
||||
# try:
|
||||
for row in update_y.itertuples(index=False):
|
||||
try:
|
||||
row_dict = row._asdict()
|
||||
yy = df[df['ds']==row_dict['ds']]['y'].values[0]
|
||||
LOW = df[df['ds']==row_dict['ds']]['Brentzdj'].values[0]
|
||||
HIGH = df[df['ds']==row_dict['ds']]['Brentzgj'].values[0]
|
||||
sqlitedb.update_data('accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'")
|
||||
except:
|
||||
logger.info(f'更新accuracy表的y值失败:{row_dict}')
|
||||
# except Exception as e:
|
||||
# logger.info(f'更新accuracy表的y值失败:{e}')
|
||||
|
||||
import datetime
|
||||
# 判断当前日期是不是周一
|
||||
is_weekday = datetime.datetime.now().weekday() == 0
|
||||
if is_weekday:
|
||||
logger.info('今天是周一,更新预测模型')
|
||||
# 计算最近20天预测残差最低的模型名称
|
||||
|
||||
model_results = sqlitedb.select_data('trueandpredict',order_by = "ds DESC",limit = "20")
|
||||
# 删除空值率为40%以上的列,删除空行
|
||||
model_results = model_results.dropna(thresh=len(model_results)*0.6,axis=1)
|
||||
# 计算最近60天预测残差最低的模型名称
|
||||
model_results = sqlitedb.select_data('trueandpredict', order_by="ds DESC", limit="60")
|
||||
# 删除空值率为90%以上的列
|
||||
if len(model_results) > 10:
|
||||
model_results = model_results.dropna(thresh=len(model_results)*0.1,axis=1)
|
||||
# 删除空行
|
||||
model_results = model_results.dropna()
|
||||
modelnames = model_results.columns.to_list()[2:]
|
||||
modelnames = model_results.columns.to_list()[2:-1]
|
||||
for col in model_results[modelnames].select_dtypes(include=['object']).columns:
|
||||
model_results[col] = model_results[col].astype(np.float32)
|
||||
# 计算每个预测值与真实值之间的偏差率
|
||||
for model in modelnames:
|
||||
model_results[f'{model}_abs_error_rate'] = abs(model_results['y'] - model_results[model]) / model_results['y']
|
||||
|
||||
# 获取每行对应的最小偏差率值
|
||||
min_abs_error_rate_values = model_results.apply(lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1)
|
||||
# 获取每行对应的最小偏差率值对应的列名
|
||||
@ -93,14 +181,43 @@ def predict_main():
|
||||
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(lambda x: x.split('_')[0])
|
||||
# 取出现次数最多的模型名称
|
||||
most_common_model = min_abs_error_rate_column_name.value_counts().idxmax()
|
||||
logger.info(f"最近20天预测残差最低的模型名称:{most_common_model}")
|
||||
|
||||
logger.info(f"最近60天预测残差最低的模型名称:{most_common_model}")
|
||||
# 保存结果到数据库
|
||||
|
||||
if not sqlitedb.check_table_exists('most_model'):
|
||||
sqlitedb.create_table('most_model', columns="ds datetime, most_common_model TEXT")
|
||||
sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
|
||||
|
||||
try:
|
||||
if is_weekday:
|
||||
# if True:
|
||||
logger.info('今天是周一,发送特征预警')
|
||||
# 上传预警信息到数据库
|
||||
warning_data_df = df_zhibiaoliebiao.copy()
|
||||
warning_data_df = warning_data_df[warning_data_df['停更周期']> 3 ][['指标名称', '指标id', '频度','更新周期','指标来源','最后更新时间','停更周期']]
|
||||
# 重命名列名
|
||||
warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY', '更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'})
|
||||
from sqlalchemy import create_engine
|
||||
import urllib
|
||||
global password
|
||||
if '@' in password:
|
||||
password = urllib.parse.quote_plus(password)
|
||||
|
||||
engine = create_engine(f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}')
|
||||
warning_data_df['WARNING_DATE'] = datetime.date.today().strftime("%Y-%m-%d %H:%M:%S")
|
||||
warning_data_df['TENANT_CODE'] = 'T0004'
|
||||
# 插入数据之前查询表数据然后新增id列
|
||||
existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine)
|
||||
if not existing_data.empty:
|
||||
max_id = existing_data['ID'].astype(int).max()
|
||||
warning_data_df['ID'] = range(max_id + 1, max_id + 1 + len(warning_data_df))
|
||||
else:
|
||||
warning_data_df['ID'] = range(1, 1 + len(warning_data_df))
|
||||
warning_data_df.to_sql(table_name, con=engine, if_exists='append', index=False)
|
||||
if is_update_warning_data:
|
||||
upload_warning_info(len(warning_data_df))
|
||||
except:
|
||||
logger.info('上传预警信息到数据库失败')
|
||||
|
||||
if is_corr:
|
||||
df = corr_feature(df=df)
|
||||
|
||||
@ -132,25 +249,22 @@ def predict_main():
|
||||
|
||||
|
||||
logger.info('模型训练完成')
|
||||
# # 模型评估
|
||||
|
||||
logger.info('训练数据绘图ing')
|
||||
model_results3 = model_losss_juxiting(sqlitedb)
|
||||
|
||||
logger.info('训练数据绘图end')
|
||||
|
||||
# 模型报告
|
||||
|
||||
logger.info('制作报告ing')
|
||||
title = f'{settings}--{now}-预测报告' # 报告标题
|
||||
|
||||
title = f'{settings}--{end_time}-预测报告' # 报告标题
|
||||
reportname = f'PP大模型预测报告--{end_time}.pdf' # 报告文件名
|
||||
reportname = reportname.replace(':', '-') # 替换冒号
|
||||
pp_export_pdf(dataset=dataset,num_models = 5 if is_fivemodels else 22,time=end_time,
|
||||
reportname=reportname,sqlitedb=sqlitedb),
|
||||
|
||||
logger.info('制作报告end')
|
||||
logger.info('模型训练完成')
|
||||
|
||||
# tansuanli_export_pdf(dataset=dataset,num_models = 5 if is_fivemodels else 22,end_time=end_time,reportname=reportname)
|
||||
|
||||
# # LSTM 单变量模型
|
||||
# ex_Lstm(df,input_seq_len=input_size,output_seq_len=horizon,is_debug=is_debug,dataset=dataset)
|
||||
|
||||
@ -170,8 +284,18 @@ def predict_main():
|
||||
file=max(glob.glob(os.path.join(dataset,'*.pdf')), key=os.path.getctime),
|
||||
ssl=ssl,
|
||||
)
|
||||
m.send_mail()
|
||||
# m.send_mail()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# global end_time
|
||||
# is_on = True
|
||||
# # 遍历2024-11-25 到 2024-12-3 之间的工作日日期
|
||||
# for i_time in pd.date_range('2025-1-20', '2025-2-6', freq='B'):
|
||||
# end_time = i_time.strftime('%Y-%m-%d')
|
||||
# try:
|
||||
# predict_main()
|
||||
# except:
|
||||
# pass
|
||||
|
||||
predict_main()
|
@ -187,9 +187,9 @@ def ex_Model(df,horizon,input_size,train_steps,val_check_steps,early_stop_patien
|
||||
filename = max(glob.glob(os.path.join(dataset,'*.joblib')), key=os.path.getctime)
|
||||
logger.info('读取模型:'+ filename)
|
||||
nf = load(filename)
|
||||
# 测试集预测
|
||||
# # 测试集预测
|
||||
# nf_test_preds = nf.cross_validation(df=df_test, val_size=val_size, test_size=test_size, n_windows=None)
|
||||
# 测试集预测结果保存
|
||||
# # 测试集预测结果保存
|
||||
# nf_test_preds.to_csv(os.path.join(dataset,"cross_validation.csv"),index=False)
|
||||
|
||||
df_test['ds'] = pd.to_datetime(df_test['ds'], errors='coerce')
|
||||
@ -1059,6 +1059,431 @@ def model_losss(sqlitedb,end_time):
|
||||
return model_results3
|
||||
|
||||
|
||||
# 聚烯烃计算预测评估指数
|
||||
@exception_logger
|
||||
def model_losss_juxitingbak(sqlitedb,end_time):
|
||||
global dataset
|
||||
global rote
|
||||
most_model = [sqlitedb.select_data('most_model',columns=['most_common_model'],order_by='ds desc',limit=1).values[0][0]]
|
||||
most_model_name = most_model[0]
|
||||
|
||||
# 预测数据处理 predict
|
||||
df_combined = loadcsv(os.path.join(dataset,"cross_validation.csv"))
|
||||
df_combined.drop(columns=['cutoff'],inplace=True)
|
||||
df_combined['CREAT_DATE'] = end_time
|
||||
df_combined = dateConvert(df_combined)
|
||||
# df_combined = sqlitedb.select_data('accuracy',where_condition=f"created_dt <= '{end_time}'")
|
||||
df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要
|
||||
# 删除缺失值大于80%的列
|
||||
logger.info(df_combined.shape)
|
||||
df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8]
|
||||
logger.info(df_combined.shape)
|
||||
# 删除缺失值
|
||||
df_combined.dropna(inplace=True)
|
||||
logger.info(df_combined.shape)
|
||||
# 其他列转为数值类型
|
||||
df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in ['CREAT_DATE','ds','created_dt'] })
|
||||
# 使用 groupby 和 transform 结合 lambda 函数来获取每个分组中 cutoff 的最小值,并创建一个新的列来存储这个最大值
|
||||
df_combined['max_cutoff'] = df_combined.groupby('ds')['CREAT_DATE'].transform('max')
|
||||
|
||||
# 然后筛选出那些 cutoff 等于 max_cutoff 的行,这样就得到了每个分组中 cutoff 最大的行,并保留了其他列
|
||||
df_combined = df_combined[df_combined['CREAT_DATE'] == df_combined['max_cutoff']]
|
||||
# 删除模型生成的cutoff列
|
||||
df_combined.drop(columns=['CREAT_DATE', 'max_cutoff','created_dt','min_within_quantile','max_within_quantile','id','min_price','max_price','LOW_PRICE','HIGH_PRICE','mean'], inplace=True)
|
||||
# 获取模型名称
|
||||
modelnames = df_combined.columns.to_list()[1:]
|
||||
if 'y' in modelnames:
|
||||
modelnames.remove('y')
|
||||
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
|
||||
|
||||
|
||||
# 空的列表存储每个模型的MSE、RMSE、MAE、MAPE、SMAPE
|
||||
cellText = []
|
||||
|
||||
# 遍历模型名称,计算模型评估指标
|
||||
for model in modelnames:
|
||||
modelmse = mse(df_combined['y'], df_combined[model])
|
||||
modelrmse = rmse(df_combined['y'], df_combined[model])
|
||||
modelmae = mae(df_combined['y'], df_combined[model])
|
||||
# modelmape = mape(df_combined['y'], df_combined[model])
|
||||
# modelsmape = smape(df_combined['y'], df_combined[model])
|
||||
# modelr2 = r2_score(df_combined['y'], df_combined[model])
|
||||
cellText.append([model,round(modelmse, 3), round(modelrmse, 3), round(modelmae, 3)])
|
||||
|
||||
model_results3 = pd.DataFrame(cellText,columns=['模型(Model)','平均平方误差(MSE)', '均方根误差(RMSE)', '平均绝对误差(MAE)'])
|
||||
# 按MSE降序排列
|
||||
model_results3 = model_results3.sort_values(by='平均平方误差(MSE)', ascending=True)
|
||||
model_results3.to_csv(os.path.join(dataset,"model_evaluation.csv"),index=False)
|
||||
modelnames = model_results3['模型(Model)'].tolist()
|
||||
allmodelnames = modelnames.copy()
|
||||
# 保存5个最佳模型的名称
|
||||
if len(modelnames) > 5:
|
||||
modelnames = modelnames[0:5]
|
||||
if is_fivemodels:
|
||||
pass
|
||||
else:
|
||||
with open(os.path.join(dataset,"best_modelnames.txt"), 'w') as f:
|
||||
f.write(','.join(modelnames) + '\n')
|
||||
|
||||
# 预测值与真实值对比图
|
||||
plt.rcParams['font.sans-serif'] = ['SimHei']
|
||||
plt.figure(figsize=(15, 10))
|
||||
for n,model in enumerate(modelnames[:5]):
|
||||
plt.subplot(3, 2, n+1)
|
||||
plt.plot(df_combined3['ds'], df_combined3['y'], label='真实值')
|
||||
plt.plot(df_combined3['ds'], df_combined3[model], label=model)
|
||||
plt.legend()
|
||||
plt.xlabel('日期')
|
||||
plt.ylabel('价格')
|
||||
plt.title(model+'拟合')
|
||||
plt.subplots_adjust(hspace=0.5)
|
||||
plt.savefig(os.path.join(dataset,'预测值与真实值对比图.png'), bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
|
||||
# # 历史数据+预测数据
|
||||
# # 拼接未来时间预测
|
||||
df_predict = pd.read_csv(os.path.join(dataset,'predict.csv'))
|
||||
df_predict.drop('unique_id',inplace=True,axis=1)
|
||||
df_predict.dropna(axis=1,inplace=True)
|
||||
|
||||
try:
|
||||
df_predict['ds'] = pd.to_datetime(df_predict['ds'],format=r'%Y-%m-%d')
|
||||
except ValueError :
|
||||
df_predict['ds'] = pd.to_datetime(df_predict['ds'],format=r'%Y/%m/%d')
|
||||
|
||||
def first_row_to_database(df):
|
||||
# # 取第一行数据存储到数据库中
|
||||
first_row = df.head(1)
|
||||
first_row['ds'] = first_row['ds'].dt.strftime('%Y-%m-%d 00:00:00')
|
||||
# 将预测结果保存到数据库
|
||||
if not sqlitedb.check_table_exists('trueandpredict'):
|
||||
first_row.to_sql('trueandpredict',sqlitedb.connection,index=False)
|
||||
else:
|
||||
for col in first_row.columns:
|
||||
sqlitedb.add_column_if_not_exists('trueandpredict',col,'TEXT')
|
||||
for row in first_row.itertuples(index=False):
|
||||
row_dict = row._asdict()
|
||||
columns=row_dict.keys()
|
||||
check_query = sqlitedb.select_data('trueandpredict',where_condition = f"ds = '{row.ds}'")
|
||||
if len(check_query) > 0:
|
||||
set_clause = ", ".join([f"{key} = '{value}'" for key, value in row_dict.items()])
|
||||
sqlitedb.update_data('trueandpredict',set_clause,where_condition = f"ds = '{row.ds}'")
|
||||
continue
|
||||
sqlitedb.insert_data('trueandpredict',tuple(row_dict.values()),columns=columns)
|
||||
|
||||
first_row_to_database(df_predict)
|
||||
|
||||
df_combined3 = pd.concat([df_combined3, df_predict]).reset_index(drop=True)
|
||||
|
||||
# 计算每个模型与最佳模型的绝对误差比例,根据设置的阈值rote筛选预测值显示最大最小值
|
||||
names = []
|
||||
names_df = df_combined3.copy()
|
||||
for col in allmodelnames:
|
||||
names_df[f'{col}-{most_model_name}-误差比例'] = abs(names_df[col] - names_df[most_model_name]) / names_df[most_model_name]
|
||||
names.append(f'{col}-{most_model_name}-误差比例')
|
||||
|
||||
names_df = names_df[names]
|
||||
def add_rote_column(row):
|
||||
columns = []
|
||||
for r in names_df.columns:
|
||||
if row[r] <= rote:
|
||||
columns.append(r.split('-')[0])
|
||||
return pd.Series([columns], index=['columns'])
|
||||
names_df['columns'] = names_df.apply(add_rote_column, axis=1)
|
||||
|
||||
def add_upper_lower_bound(row):
|
||||
print(row['columns'])
|
||||
print(type(row['columns']))
|
||||
# 计算上边界值
|
||||
upper_bound = df_combined3.loc[row.name,row['columns']].max()
|
||||
# 计算下边界值
|
||||
lower_bound = df_combined3.loc[row.name,row['columns']].min()
|
||||
return pd.Series([lower_bound, upper_bound], index=['min_within_quantile', 'max_within_quantile'])
|
||||
df_combined3[['min_within_quantile','max_within_quantile']] = names_df.apply(add_upper_lower_bound, axis=1)
|
||||
|
||||
def find_closest_values(row):
|
||||
x = row.y
|
||||
if x is None or np.isnan(x):
|
||||
return pd.Series([None, None], index=['min_price','max_price'])
|
||||
# row = row.drop('ds')
|
||||
row = row.values.tolist()
|
||||
row.sort()
|
||||
print(row)
|
||||
# x 在row中的索引
|
||||
index = row.index(x)
|
||||
if index == 0:
|
||||
return pd.Series([row[index+1], row[index+2]], index=['min_price','max_price'])
|
||||
elif index == len(row)-1:
|
||||
return pd.Series([row[index-2], row[index-1]], index=['min_price','max_price'])
|
||||
else:
|
||||
return pd.Series([row[index-1], row[index+1]], index=['min_price','max_price'])
|
||||
|
||||
def find_most_common_model():
|
||||
# 最多频率的模型名称
|
||||
min_model_max_frequency_model = df_combined3['min_model'].tail(60).value_counts().idxmax()
|
||||
max_model_max_frequency_model = df_combined3['max_model'].tail(60).value_counts().idxmax()
|
||||
if min_model_max_frequency_model == max_model_max_frequency_model:
|
||||
# 取60天第二多的模型
|
||||
max_model_max_frequency_model = df_combined3['max_model'].tail(60).value_counts().nlargest(2).index[1]
|
||||
|
||||
df_predict['min_model'] = min_model_max_frequency_model
|
||||
df_predict['max_model'] = max_model_max_frequency_model
|
||||
df_predict['min_within_quantile'] = df_predict[min_model_max_frequency_model]
|
||||
df_predict['max_within_quantile'] = df_predict[max_model_max_frequency_model]
|
||||
|
||||
|
||||
# find_most_common_model()
|
||||
|
||||
df_combined3['ds'] = pd.to_datetime(df_combined3['ds'])
|
||||
df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d')
|
||||
df_predict2 = df_combined3.tail(horizon)
|
||||
|
||||
# 保存到数据库
|
||||
if not sqlitedb.check_table_exists('accuracy'):
|
||||
columns = ','.join(df_combined3.columns.to_list()+['id','CREAT_DATE','min_price','max_price','LOW_PRICE','HIGH_PRICE','mean'])
|
||||
sqlitedb.create_table('accuracy',columns=columns)
|
||||
existing_data = sqlitedb.select_data(table_name = "accuracy")
|
||||
|
||||
if not existing_data.empty:
|
||||
max_id = existing_data['id'].astype(int).max()
|
||||
df_predict2['id'] = range(max_id + 1, max_id + 1 + len(df_predict2))
|
||||
else:
|
||||
df_predict2['id'] = range(1, 1 + len(df_predict2))
|
||||
df_predict2['CREAT_DATE'] = end_time
|
||||
|
||||
save_to_database(sqlitedb,df_predict2,"accuracy",end_time)
|
||||
|
||||
# 上周准确率计算
|
||||
accuracy_df = sqlitedb.select_data(table_name = "accuracy")
|
||||
predict_y = accuracy_df.copy()
|
||||
# ids = predict_y[predict_y['min_price'].isnull()]['id'].tolist()
|
||||
ids = predict_y['id'].tolist()
|
||||
# 准确率基准与绘图上下界逻辑一致
|
||||
# predict_y[['min_price','max_price']] = predict_y[['min_within_quantile','max_within_quantile']]
|
||||
# 模型评估前五均值
|
||||
# predict_y['min_price'] = predict_y[modelnames].mean(axis=1) -1
|
||||
# predict_y['max_price'] = predict_y[modelnames].mean(axis=1) +1
|
||||
# 模型评估前十均值
|
||||
predict_y['min_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) -1.5
|
||||
predict_y['mean'] = predict_y[allmodelnames[0:10]].mean(axis=1)
|
||||
predict_y['max_price'] = predict_y[allmodelnames[0:10]].mean(axis=1) +1.5
|
||||
# 模型评估前十最大最小
|
||||
# allmodelnames 和 predict_y 列 重复的
|
||||
# allmodelnames = [col for col in allmodelnames if col in predict_y.columns]
|
||||
# predict_y['min_price'] = predict_y[allmodelnames[0:10]].min(axis=1)
|
||||
# predict_y['max_price'] = predict_y[allmodelnames[0:10]].max(axis=1)
|
||||
for id in ids:
|
||||
row = predict_y[predict_y['id'] == id]
|
||||
try:
|
||||
sqlitedb.update_data('accuracy',f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}",f"id = {id}")
|
||||
except:
|
||||
logger.error(f'更新accuracy表中的min_price,max_price,mean值失败,row={row}')
|
||||
|
||||
df = accuracy_df.copy()
|
||||
df['ds'] = pd.to_datetime(df['ds'])
|
||||
df = df.reindex()
|
||||
|
||||
# 判断预测值在不在布伦特最高最低价范围内,准确率为1,否则为0
|
||||
def is_within_range(row):
|
||||
for model in allmodelnames:
|
||||
if row['LOW_PRICE'] <= row[col] <= row['HIGH_PRICE']:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
# 定义一个函数来计算准确率
|
||||
def calculate_accuracy(row):
|
||||
# 比较真实最高最低,和预测最高最低 计算准确率
|
||||
# 全子集情况:
|
||||
if (row['max_price'] >= row['HIGH_PRICE'] and row['min_price'] <= row['LOW_PRICE']) or \
|
||||
(row['max_price'] <= row['HIGH_PRICE'] and row['min_price'] >= row['LOW_PRICE']):
|
||||
return 1
|
||||
# 无交集情况:
|
||||
if row['max_price'] < row['LOW_PRICE'] or \
|
||||
row['min_price'] > row['HIGH_PRICE']:
|
||||
return 0
|
||||
# 有交集情况:
|
||||
else:
|
||||
sorted_prices = sorted([row['LOW_PRICE'], row['min_price'], row['max_price'], row['HIGH_PRICE']])
|
||||
middle_diff = sorted_prices[2] - sorted_prices[1]
|
||||
price_range = row['HIGH_PRICE'] - row['LOW_PRICE']
|
||||
accuracy = middle_diff / price_range
|
||||
return accuracy
|
||||
|
||||
columns = ['HIGH_PRICE','LOW_PRICE','min_price','max_price']
|
||||
df[columns] = df[columns].astype(float)
|
||||
df['ACCURACY'] = df.apply(calculate_accuracy, axis=1)
|
||||
# df['ACCURACY'] = df.apply(is_within_range, axis=1)
|
||||
|
||||
# 计算准确率并保存结果
|
||||
def _get_accuracy_rate(df,create_dates,ds_dates):
|
||||
df3 = df.copy()
|
||||
df3 = df3[df3['CREAT_DATE'].isin(create_dates)]
|
||||
df3 = df3[df3['ds'].isin(ds_dates)]
|
||||
accuracy_rote = 0
|
||||
for i,group in df3.groupby('CREAT_DATE'):
|
||||
accuracy_rote += (group['ACCURACY'].sum()/len(group))*weight_dict[len(group)-1]
|
||||
accuracy_rote = round(accuracy_rote,2)
|
||||
df4 = pd.DataFrame(columns=['开始日期','结束日期','准确率'])
|
||||
df4.loc[len(df4)] = {'开始日期':ds_dates[0],'结束日期':ds_dates[-1],'准确率':accuracy_rote}
|
||||
df4.to_sql("accuracy_rote", con=sqlitedb.connection, if_exists='append', index=False)
|
||||
create_dates,ds_dates = get_week_date(end_time)
|
||||
_get_accuracy_rate(df,create_dates,ds_dates)
|
||||
|
||||
def _add_abs_error_rate():
|
||||
# 计算每个预测值与真实值之间的偏差率
|
||||
for model in allmodelnames:
|
||||
df_combined3[f'{model}_abs_error_rate'] = abs(df_combined3['y'] - df_combined3[model]) / df_combined3['y']
|
||||
|
||||
# 获取每行对应的最小偏差率值
|
||||
min_abs_error_rate_values = df_combined3.apply(lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].min(), axis=1)
|
||||
# 获取每行对应的最小偏差率值对应的列名
|
||||
min_abs_error_rate_column_name = df_combined3.apply(lambda row: row[[f'{model}_abs_error_rate' for model in allmodelnames]].idxmin(), axis=1)
|
||||
# 将列名索引转换为列名
|
||||
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(lambda x: x.split('_')[0])
|
||||
# 获取最小偏差率对应的模型的预测值
|
||||
min_abs_error_rate_predictions = df_combined3.apply(lambda row: row[min_abs_error_rate_column_name[row.name]], axis=1)
|
||||
# 将最小偏差率对应的模型的预测值添加到DataFrame中
|
||||
df_combined3['min_abs_error_rate_prediction'] = min_abs_error_rate_predictions
|
||||
df_combined3['min_abs_error_rate_column_name'] = min_abs_error_rate_column_name
|
||||
# _add_abs_error_rate()
|
||||
|
||||
# 判断 df 的数值列转为float
|
||||
for col in df_combined3.columns:
|
||||
try:
|
||||
if col != 'ds':
|
||||
df_combined3[col] = df_combined3[col].astype(float)
|
||||
df_combined3[col] = df_combined3[col].round(2)
|
||||
except ValueError:
|
||||
pass
|
||||
df_combined3.to_csv(os.path.join(dataset,"testandpredict_groupby.csv"),index=False)
|
||||
|
||||
|
||||
# 历史价格+预测价格
|
||||
sqlitedb.drop_table('testandpredict_groupby')
|
||||
df_combined3.to_sql('testandpredict_groupby',sqlitedb.connection,index=False)
|
||||
|
||||
def _plt_predict_ture(df):
|
||||
lens = df.shape[0] if df.shape[0] < 180 else 90
|
||||
df = df[-lens:] # 取180个数据点画图
|
||||
# 历史价格
|
||||
plt.figure(figsize=(20, 10))
|
||||
# 时间格式更改
|
||||
df['ds'] = pd.to_datetime(df['ds'])
|
||||
|
||||
plt.plot(df['ds'], df['y'], label='真实值')
|
||||
# 颜色填充
|
||||
plt.fill_between(df['ds'], df['max_within_quantile'], df['min_within_quantile'], alpha=0.2)
|
||||
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
|
||||
# random_marker = random.choice(markers)
|
||||
# for model in allmodelnames:
|
||||
# for model in ['BiTCN','RNN']:
|
||||
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
|
||||
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
|
||||
# 网格
|
||||
plt.grid(True)
|
||||
# 显示历史值
|
||||
for i, j in zip(df['ds'], df['y']):
|
||||
plt.text(i, j, str(j), ha='center', va='bottom')
|
||||
|
||||
for model in most_model:
|
||||
plt.plot(df['ds'], df[model], label=model,marker='o')
|
||||
# 当前日期画竖虚线
|
||||
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
|
||||
plt.legend()
|
||||
plt.xlabel('日期')
|
||||
# 设置横轴日期格式为年-月-日
|
||||
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
# 自动设置横轴日期显示
|
||||
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
|
||||
plt.xticks(rotation=45) # 日期标签旋转45度,防止重叠
|
||||
plt.ylabel('价格')
|
||||
|
||||
plt.savefig(os.path.join(dataset,'历史价格-预测值.png'), bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
|
||||
def _plt_modeltopten_predict_ture(df):
|
||||
df['ds'] = pd.to_datetime(df['ds'])
|
||||
df['max_cutoff'] = df.groupby('ds')['CREAT_DATE'].transform('max')
|
||||
df = df[df['CREAT_DATE'] == df['max_cutoff']]
|
||||
df['mean'] = df['mean'].astype(float)
|
||||
lens = df.shape[0] if df.shape[0] < 180 else 180
|
||||
df = df[-lens:] # 取180个数据点画图
|
||||
# 历史价格
|
||||
plt.figure(figsize=(20, 10))
|
||||
plt.plot(df['ds'], df['y'], label='真实值')
|
||||
plt.plot(df['ds'], df['mean'], label='模型前十均值', linestyle='--', color='orange')
|
||||
# 颜色填充
|
||||
plt.fill_between(df['ds'], df['max_price'], df['min_price'], alpha=0.2)
|
||||
# markers = ['o', 's', '^', 'D', 'v', '*', 'p', 'h', 'H', '+', 'x', 'd']
|
||||
# random_marker = random.choice(markers)
|
||||
# for model in allmodelnames:
|
||||
# for model in ['BiTCN','RNN']:
|
||||
# plt.plot(df['ds'], df[model], label=model,marker=random_marker)
|
||||
# plt.plot(df_combined3['ds'], df_combined3['min_abs_error_rate_prediction'], label='最小绝对误差', linestyle='--', color='orange')
|
||||
# 网格
|
||||
plt.grid(True)
|
||||
# 显示历史值
|
||||
for i, j in zip(df['ds'], df['y']):
|
||||
plt.text(i, j, str(j), ha='center', va='bottom')
|
||||
|
||||
# 当前日期画竖虚线
|
||||
plt.axvline(x=df['ds'].iloc[-horizon], color='r', linestyle='--')
|
||||
plt.legend()
|
||||
plt.xlabel('日期')
|
||||
# 自动设置横轴日期显示
|
||||
plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())
|
||||
plt.xticks(rotation=45) # 日期标签旋转45度,防止重叠
|
||||
|
||||
plt.ylabel('价格')
|
||||
|
||||
plt.savefig(os.path.join(dataset,'历史价格-预测值1.png'), bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
|
||||
def _plt_predict_table(df):
|
||||
# 预测值表格
|
||||
fig, ax = plt.subplots(figsize=(20, 6))
|
||||
ax.axis('off') # 关闭坐标轴
|
||||
# 数值保留2位小数
|
||||
df = df.round(2)
|
||||
df = df[-horizon:]
|
||||
df['Day'] = [f'Day_{i}' for i in range(1,horizon+1)]
|
||||
# Day列放到最前面
|
||||
df = df[['Day'] + list(df.columns[:-1])]
|
||||
table = ax.table(cellText=df.values, colLabels=df.columns, loc='center')
|
||||
#加宽表格
|
||||
table.auto_set_font_size(False)
|
||||
table.set_fontsize(10)
|
||||
|
||||
# 设置表格样式,列数据最小的用绿色标识
|
||||
plt.savefig(os.path.join(dataset,'预测值表格.png'), bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def _plt_model_results3():
|
||||
# 可视化评估结果
|
||||
plt.rcParams['font.sans-serif'] = ['SimHei']
|
||||
fig, ax = plt.subplots(figsize=(20, 10))
|
||||
ax.axis('off') # 关闭坐标轴
|
||||
table = ax.table(cellText=model_results3.values, colLabels=model_results3.columns, loc='center')
|
||||
# 加宽表格
|
||||
table.auto_set_font_size(False)
|
||||
table.set_fontsize(10)
|
||||
|
||||
# 设置表格样式,列数据最小的用绿色标识
|
||||
plt.savefig(os.path.join(dataset,'模型评估.png'), bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
_plt_predict_ture(df_combined3)
|
||||
_plt_modeltopten_predict_ture(df_combined4)
|
||||
_plt_predict_table(df_combined3)
|
||||
_plt_model_results3()
|
||||
|
||||
return model_results3
|
||||
|
||||
|
||||
# 聚烯烃计算预测评估指数
|
||||
@exception_logger
|
||||
def model_losss_juxiting(sqlitedb):
|
||||
@ -1087,6 +1512,8 @@ def model_losss_juxiting(sqlitedb):
|
||||
modelnames = df_combined.columns.to_list()[1:]
|
||||
if 'y' in modelnames:
|
||||
modelnames.remove('y')
|
||||
if 'ds' in modelnames:
|
||||
modelnames.remove('ds')
|
||||
df_combined3 = df_combined.copy() # 备份df_combined,后面画图需要
|
||||
|
||||
|
||||
@ -1710,8 +2137,10 @@ def pp_export_pdf(num_indicators=475,num_models=21, num_dayindicator=202,inputsi
|
||||
#计算各列对于y列的差值百分比
|
||||
df3 = pd.DataFrame() # 存储偏差率
|
||||
|
||||
# 删除有null的行
|
||||
df4 = df4.dropna()
|
||||
# 删除y列有空值的行
|
||||
df4 = df4.dropna(subset=['y'])
|
||||
# # 删除有null的行
|
||||
# df4 = df4.dropna()
|
||||
df3['ds'] = df4['ds']
|
||||
for col in fivemodels_list:
|
||||
df3[col] = round(abs(df4[col] - df4['y']) / df4['y'] * 100,2)
|
||||
|
Loading…
Reference in New Issue
Block a user