原油周度预测,配置参数导入逻辑优化

This commit is contained in:
workpc 2025-03-05 16:10:20 +08:00
parent fe1e99b075
commit f1fe4ec943
3 changed files with 607 additions and 346 deletions

View File

@ -1,6 +1,5 @@
# 导入模块 # 导入模块
from config_jingbo_zhoudu import *
from reportlab.lib.units import cm # 单位cm from reportlab.lib.units import cm # 单位cm
from reportlab.graphics.shapes import Drawing # 绘图工具 from reportlab.graphics.shapes import Drawing # 绘图工具
from reportlab.graphics.charts.legends import Legend # 图例类 from reportlab.graphics.charts.legends import Legend # 图例类
@ -51,8 +50,97 @@ plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# from config_jingbo import logger # from config_jingbo import logger
global_config = {
# 核心配置项
'logger': None, # 日志记录器
'dataset': None, # 数据集路径
'y': None, # 目标变量列名
'is_fivemodels': None,
# 模型参数
'data_set': None, # 数据集名称
'input_size': None, # 输入维度
'horizon': None, # 预测步长
'train_steps': None, # 训练步数
'val_check_steps': None, # 验证间隔
# 特征工程开关
'is_del_corr': None, # 是否删除相关性特征
'is_del_tow_month': None, # 是否删除近两月未更新特征
'is_eta': None, # ETA功能开关
'is_update_eta': None, # 更新ETA开关
'is_update_eta_data': None, # ETA数据更新开关
'early_stop_patience_steps': None, # 早停步数
'is_update_report': None, # 是否更新报告开关
# 时间参数
'start_year': None, # 起始年份
'end_time': None, # 新增结束时间参数 ← 增加缺失的配置项
'freq': [None], # 数据频率(保留列表结构)
# 数据上传
'upload_url': None, # 主数据上传地址
'upload_headers': None, # 上传请求头
'upload_warning_url': None, # 预警数据上传地址
'upload_warning_data': None, # 预警数据结构
# 查询接口
'query_data_list_item_nos_url': None, # 数据项查询地址
'query_data_list_item_nos_data': None, # 数据项查询参数
# 字段映射
'offsite_col': None, # 站点字段
'avg_col': None, # 平均值字段
'offsite': None, # 站点名称
'edbcodenamedict': None, # EDB编码映射
'rote': None, # 绘图上下界阈值
# 接口配置(原有配置)
'login_pushreport_url': None,
'login_data': None,
'upload_warning_headers': None,
# ETA配置
'APPID': None,
'SECRET': None,
# 数据库配置
'sqlitedb': None,
}
# logger = global_config['logger']
# dataset = global_config['dataset']
# y = global_config['y']
# data_set = global_config['data_set']
# input_size = global_config['input_size']
# horizon = global_config['horizon']
# train_steps = global_config['train_steps']
# val_check_steps = global_config['val_check_steps']
# is_del_corr = global_config['is_del_corr']
# is_del_tow_month = global_config['is_del_tow_month']
# is_eta = global_config['is_eta']
# is_update_eta = global_config['is_update_eta']
# is_update_eta_data = global_config['is_update_eta_data']
# start_year = global_config['start_year']
# end_time = global_config['end_time']
# freq = global_config['freq'][0]
# offsite_col = global_config['offsite_col']
# avg_cols = global_config['avg_col']
# offsite = global_config['offsite']
# edbcodenamedict = global_config['edbcodenamedict']
# query_data_list_item_nos_url = global_config['query_data_list_item_nos_url']
# query_data_list_item_nos_data = global_config['query_data_list_item_nos_data']
# config.login_pushreport_url = global_config['config.login_pushreport_url']
# login_data = global_config['login_data']
# upload_url = global_config['upload_url']
# upload_warning_url = global_config['upload_warning_url']
# upload_warning_data = global_config['upload_warning_data']
# warning_data = global_config['upload_warning_data']
# APPID = global_config['APPID']
# SECRET = global_config['SECRET']
# 定义函数 # 定义函数
def loadcsv(filename): def loadcsv(filename):
""" """
读取指定文件名的 CSV 文件 读取指定文件名的 CSV 文件
@ -145,15 +233,16 @@ def get_head_auth_report():
返回: 返回:
str: 如果登录成功返回认证令牌否则返回 None str: 如果登录成功返回认证令牌否则返回 None
""" """
logger.info("获取token中...") config.logger.info("获取token中...")
logger.info(f'url:{login_pushreport_url},login_data:{login_data}') config.logger.info(
f'url:{config.login_pushreport_url},login_data:{config.login_data}')
# 发送 POST 请求到登录 URL携带登录数据 # 发送 POST 请求到登录 URL携带登录数据
login_res = requests.post(url=login_pushreport_url, login_res = requests.post(url=config.login_pushreport_url,
json=login_data, timeout=(3, 30)) json=config.login_data, timeout=(3, 30))
# 将响应内容转换为 JSON 格式 # 将响应内容转换为 JSON 格式
text = json.loads(login_res.text) text = json.loads(login_res.text)
logger.info(f'token接口响应{text}') config.logger.info(f'token接口响应{text}')
# 如果响应状态为成功 # 如果响应状态为成功
if text["status"]: if text["status"]:
# 从响应数据中获取认证令牌 # 从响应数据中获取认证令牌
@ -180,30 +269,30 @@ def upload_report_data(token, upload_data):
headers = {"Authorization": token} headers = {"Authorization": token}
# 打印日志,显示正在上传报告数据 # 打印日志,显示正在上传报告数据
logger.info("报告上传中...") config.logger.info("报告上传中...")
# 打印日志,显示认证头部信息 # 打印日志,显示认证头部信息
logger.info(f"token:{token}") config.logger.info(f"token:{token}")
# 打印日志,显示要上传的报告数据 # 打印日志,显示要上传的报告数据
logger.info(f"upload_data:{upload_data}") config.logger.info(f"upload_data:{upload_data}")
# 发送POST请求上传报告数据 # 发送POST请求上传报告数据
upload_res = requests.post( upload_res = requests.post(
url=upload_url, headers=headers, json=upload_data, timeout=(3, 15)) url=config.upload_url, headers=headers, json=upload_data, timeout=(3, 15))
# 将响应内容转换为 JSON 格式 # 将响应内容转换为 JSON 格式
upload_res = json.loads(upload_res.text) upload_res = json.loads(upload_res.text)
# 打印日志,显示响应内容 # 打印日志,显示响应内容
logger.info(upload_res) config.logger.info(upload_res)
# 如果上传成功,返回响应对象 # 如果上传成功,返回响应对象
if upload_res: if upload_res:
return upload_res return upload_res
# 如果上传失败打印日志并返回None # 如果上传失败打印日志并返回None
else: else:
logger.info("报告上传失败") config.logger.info("报告上传失败")
return None return None
@ -224,27 +313,27 @@ def upload_warning_data(warning_data):
headers = {"Authorization": token} headers = {"Authorization": token}
# 打印日志,显示正在上传预警数据 # 打印日志,显示正在上传预警数据
logger.info("预警上传中...") config.logger.info("预警上传中...")
# 打印日志显示上传的URL # 打印日志显示上传的URL
logger.info(f"upload_warning_url:{upload_warning_url}") config.logger.info(f"upload_warning_url:{config.upload_warning_url}")
# 打印日志,显示认证头部信息 # 打印日志,显示认证头部信息
logger.info(f"token:{token}") config.logger.info(f"token:{token}")
# 打印日志,显示要上传的预警数据 # 打印日志,显示要上传的预警数据
logger.info(f"warning_data:{warning_data}") config.logger.info(f"warning_data:{config.warning_data}")
# 发送POST请求上传预警数据 # 发送POST请求上传预警数据
upload_res = requests.post( upload_res = requests.post(
url=upload_warning_url, headers=headers, json=warning_data, timeout=(3, 15)) url=config.upload_warning_url, headers=headers, json=config.warning_data, timeout=(3, 15))
# 如果上传成功,返回响应对象 # 如果上传成功,返回响应对象
if upload_res: if upload_res:
return upload_res return upload_res
# 如果上传失败打印日志并返回None # 如果上传失败打印日志并返回None
else: else:
logger.info("预警上传失败") config.logger.info("预警上传失败")
return None return None
@ -259,7 +348,7 @@ def upload_warning_info(df_count):
None None
""" """
# 打印日志,显示正在上传预警信息 # 打印日志,显示正在上传预警信息
logger.info(f'上传预警信息') config.logger.info(f'上传预警信息')
try: try:
# 获取当前日期 # 获取当前日期
@ -270,17 +359,17 @@ def upload_warning_info(df_count):
content = f'{warning_date}{df_count}个停更' content = f'{warning_date}{df_count}个停更'
# 更新预警数据中的日期和内容 # 更新预警数据中的日期和内容
warning_data['data']['WARNING_DATE'] = warning_date2 config.warning_data['data']['WARNING_DATE'] = warning_date2
warning_data['data']['WARNING_CONTENT'] = content config.warning_data['data']['WARNING_CONTENT'] = content
# 调用 upload_warning_data 函数上传预警数据 # 调用 upload_warning_data 函数上传预警数据
upload_warning_data(warning_data) upload_warning_data(config.warning_data)
# 打印日志,显示上传预警信息成功 # 打印日志,显示上传预警信息成功
logger.info(f'上传预警信息成功') config.logger.info(f'上传预警信息成功')
except Exception as e: except Exception as e:
# 打印日志,显示上传预警信息失败,并记录异常信息 # 打印日志,显示上传预警信息失败,并记录异常信息
logger.error(f'上传预警信息失败:{e}') config.logger.error(f'上传预警信息失败:{e}')
def create_feature_last_update_time(df): def create_feature_last_update_time(df):
@ -317,7 +406,8 @@ def create_feature_last_update_time(df):
0]).total_seconds() / 3600 / 24 0]).total_seconds() / 3600 / 24
last_update_time_datetime = datetime.datetime.strptime( last_update_time_datetime = datetime.datetime.strptime(
last_update_time, '%Y-%m-%d') last_update_time, '%Y-%m-%d')
last_update_date = end_time if end_time != '' else datetime.datetime.now().strftime('%Y-%m-%d') last_update_date = config.end_time if config.end_time != '' else datetime.datetime.now(
).strftime('%Y-%m-%d')
end_time_datetime = datetime.datetime.strptime( end_time_datetime = datetime.datetime.strptime(
last_update_date, '%Y-%m-%d') last_update_date, '%Y-%m-%d')
early_warning_date = last_update_time_datetime + \ early_warning_date = last_update_time_datetime + \
@ -327,18 +417,19 @@ def create_feature_last_update_time(df):
early_warning_date = early_warning_date.strftime('%Y-%m-%d') early_warning_date = early_warning_date.strftime('%Y-%m-%d')
except KeyError: except KeyError:
time_diff = 0 time_diff = 0
early_warning_date = end_time early_warning_date = config.end_time
continue continue
values = values + [time_diff, early_warning_date, stop_update_period] values = values + [time_diff, early_warning_date, stop_update_period]
last_update_times_df.loc[len(last_update_times_df)] = values last_update_times_df.loc[len(last_update_times_df)] = values
logger.info(f"Column {column} was last updated at {last_update_time}") config.logger.info(
f"Column {column} was last updated at {last_update_time}")
y_last_update_time = last_update_times_df[last_update_times_df['feature'] y_last_update_time = last_update_times_df[last_update_times_df['feature']
== 'y']['warning_date'].values[0] == 'y']['warning_date'].values[0]
last_update_times_df.to_csv(os.path.join( last_update_times_df.to_csv(os.path.join(
dataset, 'last_update_times.csv'), index=False) config.dataset, 'last_update_times.csv'), index=False)
logger.info('特征停更信息保存到文件last_update_times.csv') config.logger.info('特征停更信息保存到文件last_update_times.csv')
return last_update_times_df, y_last_update_time return last_update_times_df, y_last_update_time
@ -378,7 +469,7 @@ def featurePindu(dataset):
try: try:
count = max(set(count), key=count.count) count = max(set(count), key=count.count)
except ValueError: except ValueError:
logger.info(f'{column}列数据为空') config.logger.info(f'{column}列数据为空')
continue continue
# 存储到字典中 # 存储到字典中
count_dict[column] = count count_dict[column] = count
@ -402,7 +493,7 @@ def featurePindu(dataset):
# nan替换为 ' ' # nan替换为 ' '
pindu_dfs = pindu_dfs.fillna('') pindu_dfs = pindu_dfs.fillna('')
pindu_dfs.to_csv(os.path.join(dataset, '特征频度统计.csv'), index=False) pindu_dfs.to_csv(os.path.join(dataset, '特征频度统计.csv'), index=False)
logger.info(pindu_dfs) config.logger.info(pindu_dfs)
featureInfo = f'特征信息:总共有{len(columns)-2}' featureInfo = f'特征信息:总共有{len(columns)-2}'
for i in pindu_dfs.columns: for i in pindu_dfs.columns:
featureInfo += f',{i}' featureInfo += f',{i}'
@ -419,10 +510,10 @@ def featurePindu(dataset):
-- 向前填充举例采集数据开始日期为2018年1月1日那么周度数据可能是2018年1月3日那么3日的数据向前填充使1日2日都有数值 -- 向前填充举例采集数据开始日期为2018年1月1日那么周度数据可能是2018年1月3日那么3日的数据向前填充使1日2日都有数值
数据特征相关性分析 数据特征相关性分析
''' '''
logger.info(featureInfo) config.logger.info(featureInfo)
with open(os.path.join(dataset, '特征频度统计.txt'), 'w', encoding='utf-8') as f: with open(os.path.join(dataset, '特征频度统计.txt'), 'w', encoding='utf-8') as f:
f.write(featureInfo) f.write(featureInfo)
logger.info('*'*200) config.logger.info('*'*200)
def featureAnalysis(df, dataset, y): def featureAnalysis(df, dataset, y):
@ -477,10 +568,10 @@ def corr_feature(df):
df_test_noscaler = df_test.copy() # 滞后处理备份 df_test_noscaler = df_test.copy() # 滞后处理备份
df_noscaler = df_test.copy() df_noscaler = df_test.copy()
# 画出相关性热力图 # 画出相关性热力图
df_test.to_csv(os.path.join(dataset, '同步相关性.csv')) df_test.to_csv(os.path.join(config.dataset, '同步相关性.csv'))
corr = df_test.corr() corr = df_test.corr()
# 保存相关系数 # 保存相关系数
corr.to_csv(os.path.join(dataset, '同步相关性系数.csv')) corr.to_csv(os.path.join(config.dataset, '同步相关性系数.csv'))
# plt.figure(figsize=(10, 10)) # plt.figure(figsize=(10, 10))
# sns.heatmap(corr, annot=True, cmap='coolwarm') # sns.heatmap(corr, annot=True, cmap='coolwarm')
# plt.savefig('dataset/同步相关性热力图.png') # plt.savefig('dataset/同步相关性热力图.png')
@ -502,7 +593,7 @@ def corr_feature(df):
== col]['滞后周期'].values[0] == col]['滞后周期'].values[0]
# 滞后处理 # 滞后处理
df[col] = df[col].shift(period) df[col] = df[col].shift(period)
df.to_csv(os.path.join(dataset, '滞后处理后的数据集.csv')) df.to_csv(os.path.join(config.dataset, '滞后处理后的数据集.csv'))
# corr_feture_noscaler = {} # 保存相关性最大的周期 # corr_feture_noscaler = {} # 保存相关性最大的周期
# 遍历df_test的每一列计算相关性 # 遍历df_test的每一列计算相关性
@ -510,7 +601,7 @@ def corr_feature(df):
# # 跳过y列 # # 跳过y列
# if col in ['y']: # if col in ['y']:
# continue # continue
# logger.info('特征:', col) # config.logger.info('特征:', col)
# # 特征滞后n个周期计算与y的相关性 # # 特征滞后n个周期计算与y的相关性
# corr_dict = {} # corr_dict = {}
# try: # try:
@ -521,10 +612,10 @@ def corr_feature(df):
# df_noscaler[col+'_'+str(i)] = df_noscaler[col].shift(i) # df_noscaler[col+'_'+str(i)] = df_noscaler[col].shift(i)
# corr_dict[col+'_'+str(i)] = abs(df_noscaler[col+'_'+str(i)].corr(df_noscaler['y'])) # corr_dict[col+'_'+str(i)] = abs(df_noscaler[col+'_'+str(i)].corr(df_noscaler['y']))
# except : # except :
# logger.info('特征:', col, '滑动错误,请查看') # config.logger.info('特征:', col, '滑动错误,请查看')
# continue # continue
# 输出相关性最大的特征 # 输出相关性最大的特征
# logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)]) # config.logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)])
# corr_feture_noscaler[col] = max(corr_dict, key=corr_dict.get).split('_')[-1] # corr_feture_noscaler[col] = max(corr_dict, key=corr_dict.get).split('_')[-1]
# 画出最相关性最大的特征和y的折线图 # 画出最相关性最大的特征和y的折线图
# plt.figure(figsize=(10, 5)) # plt.figure(figsize=(10, 5))
@ -541,7 +632,7 @@ def corr_feature(df):
# plt.savefig('dataset/特征与y的折线图_'+max(corr_dict, key=corr_dict.get).replace(':','_').replace('/','_').replace('(','_').replace(')','_')+'.png') # plt.savefig('dataset/特征与y的折线图_'+max(corr_dict, key=corr_dict.get).replace(':','_').replace('/','_').replace('(','_').replace(')','_')+'.png')
# plt.close() # plt.close()
# 结果保存到txt文件 # 结果保存到txt文件
# logger.info('不参与标准化的特征滞后相关性写入txt文件') # config.logger.info('不参与标准化的特征滞后相关性写入txt文件')
# with open('dataset/不参与标准化的特征滞后相关性.txt', 'w') as f: # with open('dataset/不参与标准化的特征滞后相关性.txt', 'w') as f:
# for key, value in corr_feture_noscaler.items(): # for key, value in corr_feture_noscaler.items():
# f.write('%s:%s\n' % (key, value)) # f.write('%s:%s\n' % (key, value))
@ -576,7 +667,7 @@ def corr_feature(df):
# # 跳过y列 # # 跳过y列
# if col == 'y': # if col == 'y':
# continue # continue
# logger.info('特征:', col) # config.logger.info('特征:', col)
# # 特征滞后n个周期计算与y的相关性 # # 特征滞后n个周期计算与y的相关性
# corr_dict = {} # corr_dict = {}
# try: # try:
@ -587,10 +678,10 @@ def corr_feature(df):
# df_test[col+'_'+str(i)] = df_test[col].shift(i) # df_test[col+'_'+str(i)] = df_test[col].shift(i)
# corr_dict[col+'_'+str(i)] = abs(df_test[col+'_'+str(i)].corr(df_test['y'])) # corr_dict[col+'_'+str(i)] = abs(df_test[col+'_'+str(i)].corr(df_test['y']))
# except : # except :
# logger.info('特征:', col, '滑动错误,请查看') # config.logger.info('特征:', col, '滑动错误,请查看')
# continue # continue
# # 输出相关性最大的特征 # # 输出相关性最大的特征
# logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)]) # config.logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)])
# corr_feture[col] = max(corr_dict, key=corr_dict.get).split('_')[-1] # corr_feture[col] = max(corr_dict, key=corr_dict.get).split('_')[-1]
# # 结果保存到txt文件 # # 结果保存到txt文件
@ -684,8 +775,8 @@ def check_column(df, col_name, two_months_ago):
return True return True
# 判断相关系数大于0.6 # 判断相关系数大于0.6
if is_del_corr > 0: if config.is_del_corr > 0:
if abs(df_check_column[col_name].corr(df_check_column['y'])) < is_del_corr: if abs(df_check_column[col_name].corr(df_check_column['y'])) < config.is_del_corr:
print(f'相关系数小于0.6{col_name}') print(f'相关系数小于0.6{col_name}')
return True return True
@ -710,50 +801,50 @@ def datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_time='', y
df.sort_values(by='ds', inplace=True) df.sort_values(by='ds', inplace=True)
df['ds'] = pd.to_datetime(df['ds']) df['ds'] = pd.to_datetime(df['ds'])
# 获取start_year年到end_time的数据 # 获取start_year年到end_time的数据
df = df[df['ds'].dt.year >= start_year] df = df[df['ds'].dt.year >= config.start_year]
df = df[df['ds'] <= end_time] df = df[df['ds'] <= end_time]
# last_update_times_df,y_last_update_time = create_feature_last_update_time(df) # last_update_times_df,y_last_update_time = create_feature_last_update_time(df)
# logger.info(f'删除预警的特征前数据量:{df.shape}') # config.logger.info(f'删除预警的特征前数据量:{df.shape}')
# columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist() # columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist()
# df = df.drop(columns = columns_to_drop) # df = df.drop(columns = columns_to_drop)
# logger.info(f'删除预警的特征后数据量:{df.shape}') # config.logger.info(f'删除预警的特征后数据量:{df.shape}')
# if is_update_warning_data: # if is_update_warning_data:
# upload_warning_info(last_update_times_df,y_last_update_time) # upload_warning_info(last_update_times_df,y_last_update_time)
# 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列 # 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列
if is_del_tow_month: if config.is_del_tow_month:
current_date = datetime.datetime.now() current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=180) two_months_ago = current_date - timedelta(days=180)
logger.info(f'删除两月不更新特征前数据量:{df.shape}') config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
columns_to_drop = [] columns_to_drop = []
for clo in df.columns: for clo in df.columns:
if check_column(df, clo, two_months_ago): if check_column(df, clo, two_months_ago):
columns_to_drop.append(clo) columns_to_drop.append(clo)
df = df.drop(columns=columns_to_drop) df = df.drop(columns=columns_to_drop)
logger.info(f'删除两月不更新特征后数据量:{df.shape}') config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
# 衍生时间特征 # 衍生时间特征
if is_timefurture: if is_timefurture:
df = addtimecharacteristics(df=df, dataset=dataset) df = addtimecharacteristics(df=df, dataset=dataset)
if freq == 'WW': if config.freq == 'WW':
# 自定义周数据 # 自定义周数据
# 按weekofmothe分组取均值得到新的数据 # 按weekofmothe分组取均值得到新的数据
df = df.groupby(df['yearmonthweeks']).mean() df = df.groupby(df['yearmonthweeks']).mean()
# 时间列转换为日期格式字符串 # 时间列转换为日期格式字符串
df['ds'] = df['ds'].dt.strftime('%Y-%m-%d') df['ds'] = df['ds'].dt.strftime('%Y-%m-%d')
elif freq == 'W': elif config.freq == 'W':
# 按周取样 # 按周取样
df = df.resample('W', on='ds').mean().reset_index() df = df.resample('W', on='ds').mean().reset_index()
elif freq == 'M': elif config.freq == 'M':
# 按月取样 # 按月取样
df = df.resample('M', on='ds').mean().reset_index() df = df.resample('M', on='ds').mean().reset_index()
# 删除预测列空值的行 # 删除预测列空值的行
''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 ''' ''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 '''
# df = df.dropna(subset=['y']) # df = df.dropna(subset=['y'])
logger.info(f'删除预测列为空值的行后数据量:{df.shape}') config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
df = df.dropna(axis=1, how='all') df = df.dropna(axis=1, how='all')
logger.info(f'删除全为空值的列后数据量:{df.shape}') config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False) df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
# 去掉指标列表中的columns_to_drop的行 # 去掉指标列表中的columns_to_drop的行
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin( df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
@ -797,40 +888,40 @@ def zhoududatachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_time
df.sort_values(by='ds', inplace=True) df.sort_values(by='ds', inplace=True)
df['ds'] = pd.to_datetime(df['ds']) df['ds'] = pd.to_datetime(df['ds'])
# 获取start_year年到end_time的数据 # 获取start_year年到end_time的数据
df = df[df['ds'].dt.year >= start_year] df = df[df['ds'].dt.year >= config.start_year]
df = df[df['ds'] <= end_time] df = df[df['ds'] <= end_time]
# last_update_times_df,y_last_update_time = create_feature_last_update_time(df) # last_update_times_df,y_last_update_time = create_feature_last_update_time(df)
# logger.info(f'删除预警的特征前数据量:{df.shape}') # config.logger.info(f'删除预警的特征前数据量:{df.shape}')
# columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist() # columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist()
# df = df.drop(columns = columns_to_drop) # df = df.drop(columns = columns_to_drop)
# logger.info(f'删除预警的特征后数据量:{df.shape}') # config.logger.info(f'删除预警的特征后数据量:{df.shape}')
# if is_update_warning_data: # if is_update_warning_data:
# upload_warning_info(last_update_times_df,y_last_update_time) # upload_warning_info(last_update_times_df,y_last_update_time)
# 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列 # 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列
if is_del_tow_month: if config.is_del_tow_month:
current_date = datetime.datetime.now() current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=180) two_months_ago = current_date - timedelta(days=180)
logger.info(f'删除两月不更新特征前数据量:{df.shape}') config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
columns_to_drop = [] columns_to_drop = []
for clo in df.columns: for clo in df.columns:
if check_column(df, clo, two_months_ago): if check_column(df, clo, two_months_ago):
columns_to_drop.append(clo) columns_to_drop.append(clo)
df = df.drop(columns=columns_to_drop) df = df.drop(columns=columns_to_drop)
logger.info(f'删除两月不更新特征后数据量:{df.shape}') config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
if freq == 'W': if config.freq == 'W':
# 按周取样 # 按周取样
df = df.resample('W', on='ds').mean().reset_index() df = df.resample('W', on='ds').mean().reset_index()
elif freq == 'M': elif config.freq == 'M':
# 按月取样 # 按月取样
df = df.resample('M', on='ds').mean().reset_index() df = df.resample('M', on='ds').mean().reset_index()
# 删除预测列空值的行 # 删除预测列空值的行
''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 ''' ''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 '''
# df = df.dropna(subset=['y']) # df = df.dropna(subset=['y'])
logger.info(f'删除预测列为空值的行后数据量:{df.shape}') config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
df = df.dropna(axis=1, how='all') df = df.dropna(axis=1, how='all')
logger.info(f'删除全为空值的列后数据量:{df.shape}') config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False) df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
# 去掉指标列表中的columns_to_drop的行 # 去掉指标列表中的columns_to_drop的行
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin( df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
@ -872,11 +963,11 @@ def datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_t
df.rename(columns={datecol: 'ds'}, inplace=True) df.rename(columns={datecol: 'ds'}, inplace=True)
# 指定列统一减少数值 # 指定列统一减少数值
df[offsite_col] = df[offsite_col]-offsite df[config.offsite_col] = df[config.offsite_col]-config.offsite
# 预测列为avg_cols的均值 # 预测列为avg_cols的均值
df[y] = df[avg_cols].mean(axis=1) df[y] = df[config.avg_cols].mean(axis=1)
# 去掉多余的列avg_cols # 去掉多余的列avg_cols
df = df.drop(columns=avg_cols) df = df.drop(columns=config.avg_cols)
# 重命名预测列 # 重命名预测列
df.rename(columns={y: 'y'}, inplace=True) df.rename(columns={y: 'y'}, inplace=True)
@ -887,7 +978,7 @@ def datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_t
df = df[df['ds'].dt.year >= 2018] df = df[df['ds'].dt.year >= 2018]
# 获取小于等于当前日期的数据 # 获取小于等于当前日期的数据
df = df[df['ds'] <= end_time] df = df[df['ds'] <= end_time]
logger.info(f'删除两月不更新特征前数据量:{df.shape}') config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
# 去掉近最后数据对应的日期在两月以前的列删除近2月的数据是常数的列 # 去掉近最后数据对应的日期在两月以前的列删除近2月的数据是常数的列
current_date = datetime.datetime.now() current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=40) two_months_ago = current_date - timedelta(days=40)
@ -907,13 +998,13 @@ def datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_t
columns_to_drop = df.columns[df.columns.map(check_column)].tolist() columns_to_drop = df.columns[df.columns.map(check_column)].tolist()
df = df.drop(columns=columns_to_drop) df = df.drop(columns=columns_to_drop)
logger.info(f'删除两月不更新特征后数据量:{df.shape}') config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
# 删除预测列空值的行 # 删除预测列空值的行
df = df.dropna(subset=['y']) df = df.dropna(subset=['y'])
logger.info(f'删除预测列为空值的行后数据量:{df.shape}') config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
df = df.dropna(axis=1, how='all') df = df.dropna(axis=1, how='all')
logger.info(f'删除全为空值的列后数据量:{df.shape}') config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False) df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
# 去掉指标列表中的columns_to_drop的行 # 去掉指标列表中的columns_to_drop的行
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin( df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
@ -942,7 +1033,7 @@ def datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_t
def getdata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''): def getdata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
logger.info('getdata接收'+filename+' '+datecol+' '+end_time) config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
# 判断后缀名 csv或excel # 判断后缀名 csv或excel
if filename.endswith('.csv'): if filename.endswith('.csv'):
df = loadcsv(filename) df = loadcsv(filename)
@ -959,7 +1050,7 @@ def getdata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timef
def getzhoududata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''): def getzhoududata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
logger.info('getdata接收'+filename+' '+datecol+' '+end_time) config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
# 判断后缀名 csv或excel # 判断后缀名 csv或excel
if filename.endswith('.csv'): if filename.endswith('.csv'):
df = loadcsv(filename) df = loadcsv(filename)
@ -976,7 +1067,7 @@ def getzhoududata(filename, datecol='date', y='y', dataset='', add_kdj=False, is
def getdata_juxiting(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''): def getdata_juxiting(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
logger.info('getdata接收'+filename+' '+datecol+' '+end_time) config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
# 判断后缀名 csv或excel # 判断后缀名 csv或excel
if filename.endswith('.csv'): if filename.endswith('.csv'):
df = loadcsv(filename) df = loadcsv(filename)
@ -1002,6 +1093,103 @@ def sanitize_filename(filename):
return sanitized return sanitized
class Config:
# 核心配置
@property
def logger(self): return global_config['logger']
@property
def dataset(self): return global_config['dataset']
@property
def y(self): return global_config['y']
@property
def is_fivemodels(self): return global_config['is_fivemodels']
# 模型参数
@property
def data_set(self): return global_config['data_set']
@property
def input_size(self): return global_config['input_size']
@property
def horizon(self): return global_config['horizon']
@property
def train_steps(self): return global_config['train_steps']
@property
def val_check_steps(self): return global_config['val_check_steps']
@property
def rote(self): return global_config['rote']
# 特征工程开关
@property
def is_del_corr(self): return global_config['is_del_corr']
@property
def is_del_tow_month(self): return global_config['is_del_tow_month']
@property
def is_eta(self): return global_config['is_eta']
@property
def is_update_eta(self): return global_config['is_update_eta']
@property
def is_update_eta_data(self): return global_config['is_update_eta_data']
@property
def is_update_report(self): return global_config['is_update_report']
# 时间参数
@property
def start_year(self): return global_config['start_year']
@property
def end_time(self): return global_config['end_time']
@property
def freq(self): return global_config['freq']
# 接口配置
@property
def upload_url(self): return global_config['upload_url']
@property
def login_pushreport_url(
self): return global_config['login_pushreport_url']
@property
def login_data(self): return global_config['login_data']
@property
def upload_headers(self): return global_config['upload_headers']
@property
def upload_warning_url(self): return global_config['upload_warning_url']
@property
def upload_warning_data(self): return global_config['upload_warning_data']
# 查询接口
@property
def query_data_list_item_nos_url(
self): return global_config['query_data_list_item_nos_url']
@property
def query_data_list_item_nos_data(
self): return global_config['query_data_list_item_nos_data']
# 字段映射
@property
def offsite_col(self): return global_config['offsite_col']
@property
def avg_col(self): return global_config['avg_col']
@property
def offsite(self): return global_config['offsite']
@property
def edbcodenamedict(self): return global_config['edbcodenamedict']
# ETA配置
@property
def APPID(self): return global_config['APPID']
@property
def SECRET(self): return global_config['SECRET']
# 数据库配置
@property
def sqlitedb(self): return global_config['sqlitedb']
config = Config()
class BinanceAPI: class BinanceAPI:
''' '''
获取 Binance API 请求头签名 获取 Binance API 请求头签名
@ -1397,7 +1585,8 @@ class EtaReader():
else: else:
# 请求失败,打印错误信息 # 请求失败,打印错误信息
logger.info(f'Error: {response.status_code}, {response.text}') config.logger.info(
f'Error: {response.status_code}, {response.text}')
# 主动抛出异常 # 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}') raise Exception(f'Error: {response.status_code}, {response.text}')
@ -1449,7 +1638,7 @@ class EtaReader():
data = response.json() # 假设接口返回的是JSON数据 data = response.json() # 假设接口返回的是JSON数据
# 请求成功,处理响应内容 # 请求成功,处理响应内容
# logger.info(data.get('Data')) # config.logger.info(data.get('Data'))
# 定义你想要保留的固定值 # 定义你想要保留的固定值
fixed_value = 1214 fixed_value = 1214
@ -1469,7 +1658,7 @@ class EtaReader():
url = self.classifyidlisturl+str(ClassifyId) url = self.classifyidlisturl+str(ClassifyId)
response = requests.get(url, headers=self.headers) response = requests.get(url, headers=self.headers)
if response.status_code == 200: if response.status_code == 200:
# logger.info(response.text) # config.logger.info(response.text)
data2 = response.json() data2 = response.json()
Data = data2.get('Data') Data = data2.get('Data')
for i in Data: for i in Data:
@ -1504,7 +1693,8 @@ class EtaReader():
edbname_df = edbname_df.dropna() edbname_df = edbname_df.dropna()
if len(edbname_df) == 0: if len(edbname_df) == 0:
logger.info(f'指标名称:{EdbName} 没有数据') config.logger.info(
f'指标名称:{EdbName} 没有数据')
continue continue
try: try:
time_sequence = edbname_df['DataTime'].values.tolist( time_sequence = edbname_df['DataTime'].values.tolist(
@ -1554,18 +1744,18 @@ class EtaReader():
# df = pd.merge(df, df2, how='outer') # df = pd.merge(df, df2, how='outer')
df = pd.concat([df, df2]) df = pd.concat([df, df2])
else: else:
logger.info(f'跳过指标 {EdbName}') config.logger.info(f'跳过指标 {EdbName}')
# 找到列表中不在指标列中的指标id保存成新的list # 找到列表中不在指标列中的指标id保存成新的list
new_list = [ new_list = [
item for item in self.edbcodelist if item not in df['指标id'].tolist()] item for item in self.edbcodelist if item not in df['指标id'].tolist()]
logger.info(new_list) config.logger.info(new_list)
# 遍历new_list获取指标数据保存到df1 # 遍历new_list获取指标数据保存到df1
for item in new_list: for item in new_list:
logger.info(item) config.logger.info(item)
# 将item 加入到 df['指标id']中 # 将item 加入到 df['指标id']中
try: try:
itemname = edbcodenamedict[item] itemname = config.edbcodenamedict[item]
except: except:
itemname = item itemname = item
@ -1579,8 +1769,8 @@ class EtaReader():
# df1.dropna(inplace=True) # df1.dropna(inplace=True)
# 去掉大于今天日期的行 # 去掉大于今天日期的行
df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')] df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')]
logger.info(df1.head()) config.logger.info(df1.head())
# logger.info(f'{df1.head()}') # config.logger.info(f'{df1.head()}')
df_zhibiaoshuju = df1.copy() df_zhibiaoshuju = df1.copy()
df_zhibiaoliebiao = df.copy() df_zhibiaoliebiao = df.copy()
@ -1624,7 +1814,7 @@ class EtaReader():
data = response.json() # 假设接口返回的是JSON数据 data = response.json() # 假设接口返回的是JSON数据
# 请求成功,处理响应内容 # 请求成功,处理响应内容
# logger.info(data.get('Data')) # config.logger.info(data.get('Data'))
# 定义你想要保留的固定值 # 定义你想要保留的固定值
fixed_value = ClassifyId fixed_value = ClassifyId
@ -1644,7 +1834,7 @@ class EtaReader():
url = self.classifyidlisturl+str(ClassifyId) url = self.classifyidlisturl+str(ClassifyId)
response = requests.get(url, headers=self.headers) response = requests.get(url, headers=self.headers)
if response.status_code == 200: if response.status_code == 200:
# logger.info(response.text) # config.logger.info(response.text)
data2 = response.json() data2 = response.json()
Data = data2.get('Data') Data = data2.get('Data')
for i in Data: for i in Data:
@ -1669,18 +1859,18 @@ class EtaReader():
df = pd.concat([df, df2]) df = pd.concat([df, df2])
df1 = self.edbcodegetdata(df1, EdbCode, EdbName) df1 = self.edbcodegetdata(df1, EdbCode, EdbName)
else: else:
logger.info(f'跳过指标 {EdbName}') config.logger.info(f'跳过指标 {EdbName}')
# 找到列表中不在指标列中的指标id保存成新的list # 找到列表中不在指标列中的指标id保存成新的list
new_list = [ new_list = [
item for item in self.edbcodelist if item not in df['指标id'].tolist()] item for item in self.edbcodelist if item not in df['指标id'].tolist()]
logger.info(new_list) config.logger.info(new_list)
# 遍历new_list获取指标数据保存到df1 # 遍历new_list获取指标数据保存到df1
for item in new_list: for item in new_list:
logger.info(item) config.logger.info(item)
# 将item 加入到 df['指标id']中 # 将item 加入到 df['指标id']中
try: try:
itemname = edbcodenamedict[item] itemname = config.edbcodenamedict[item]
except: except:
itemname = item itemname = item
@ -1694,8 +1884,8 @@ class EtaReader():
# df1.dropna(inplace=True) # df1.dropna(inplace=True)
# 去掉大于今天日期的行 # 去掉大于今天日期的行
df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')] df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')]
logger.info(df1.head()) config.logger.info(df1.head())
# logger.info(f'{df1.head()}') # config.logger.info(f'{df1.head()}')
# 保存到xlsx文件的sheet表 # 保存到xlsx文件的sheet表
with pd.ExcelWriter(os.path.join(dataset, data_set)) as file: with pd.ExcelWriter(os.path.join(dataset, data_set)) as file:
df1.to_excel(file, sheet_name='指标数据', index=False) df1.to_excel(file, sheet_name='指标数据', index=False)
@ -1719,7 +1909,7 @@ class EtaReader():
} }
# 发送post请求 上传数据 # 发送post请求 上传数据
logger.info(f'请求参数:{data}') config.logger.info(f'请求参数:{data}')
response = requests.post( response = requests.post(
self.edbdatapushurl, headers=self.headers, data=json.dumps(data)) self.edbdatapushurl, headers=self.headers, data=json.dumps(data))
@ -1727,11 +1917,12 @@ class EtaReader():
if response.status_code == 200: if response.status_code == 200:
data = response.json() # 假设接口返回的是JSON数据 data = response.json() # 假设接口返回的是JSON数据
logger.info(f'上传成功,响应为:{data}') config.logger.info(f'上传成功,响应为:{data}')
else: else:
# 请求失败,打印错误信息 # 请求失败,打印错误信息
logger.info(f'Error: {response.status_code}, {response.text}') config.logger.info(
f'Error: {response.status_code}, {response.text}')
# 主动抛出异常 # 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}') raise Exception(f'Error: {response.status_code}, {response.text}')
@ -1758,11 +1949,12 @@ class EtaReader():
if response.status_code == 200: if response.status_code == 200:
data = response.json() # 假设接口返回的是JSON数据 data = response.json() # 假设接口返回的是JSON数据
logger.info('删除成功,响应为:', data) config.logger.info('删除成功,响应为:', data)
else: else:
# 请求失败,打印错误信息 # 请求失败,打印错误信息
logger.info(f'Error: {response.status_code}, {response.text}') config.logger.info(
f'Error: {response.status_code}, {response.text}')
# 主动抛出异常 # 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}') raise Exception(f'Error: {response.status_code}, {response.text}')
@ -1797,11 +1989,12 @@ class EtaReader():
if response.status_code == 200: if response.status_code == 200:
data = response.json() # 假设接口返回的是JSON数据 data = response.json() # 假设接口返回的是JSON数据
logger.info('删除成功,响应为:', data) config.logger.info('删除成功,响应为:', data)
else: else:
# 请求失败,打印错误信息 # 请求失败,打印错误信息
logger.info(f'Error: {response.status_code}, {response.text}') config.logger.info(
f'Error: {response.status_code}, {response.text}')
# 主动抛出异常 # 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}') raise Exception(f'Error: {response.status_code}, {response.text}')
@ -1813,15 +2006,15 @@ def get_market_data(end_time, df):
# 获取token # 获取token
token = get_head_auth_report() token = get_head_auth_report()
# 定义请求参数 # 定义请求参数
query_data_list_item_nos_data['data']['dateEnd'] = end_time.replace( config.query_data_list_item_nos_data['data']['dateEnd'] = end_time.replace(
'-', '') '-', '')
# 发送请求 # 发送请求
headers = {"Authorization": token} headers = {"Authorization": token}
logger.info('获取数据中...') config.logger.info('获取数据中...')
items_res = requests.post(url=query_data_list_item_nos_url, headers=headers, items_res = requests.post(url=config.query_data_list_item_nos_url, headers=headers,
json=query_data_list_item_nos_data, timeout=(3, 35)) json=config.query_data_list_item_nos_data, timeout=(3, 35))
json_data = json.loads(items_res.text) json_data = json.loads(items_res.text)
logger.info(f"获取到的数据:{json_data}") config.logger.info(f"获取到的数据:{json_data}")
df3 = pd.DataFrame(json_data['data']) df3 = pd.DataFrame(json_data['data'])
# 按照dataItemNo 分组 得到多个dataframe 最后根据dataDate merge 成一个dataframe # 按照dataItemNo 分组 得到多个dataframe 最后根据dataDate merge 成一个dataframe
df2 = pd.DataFrame() df2 = pd.DataFrame()
@ -1843,7 +2036,7 @@ def get_market_data(end_time, df):
def get_high_low_data(df): def get_high_low_data(df):
# 读取excel 从第五行开始 # 读取excel 从第五行开始
df1 = pd.read_excel(os.path.join(dataset, '数据项下载.xls'), header=5, names=[ df1 = pd.read_excel(os.path.join(config.dataset, '数据项下载.xls'), header=5, names=[
'numid', 'date', 'Brentzdj', 'Brentzgj']) 'numid', 'date', 'Brentzdj', 'Brentzgj'])
# 合并数据 # 合并数据
df = pd.merge(df, df1, how='left', on='date') df = pd.merge(df, df1, how='left', on='date')

View File

@ -1,14 +1,67 @@
# 读取配置 # 读取配置
from lib.dataread import * from lib.dataread import *
# from config_jingbo_zhoudu import * from config_jingbo_zhoudu import *
from lib.tools import SendMail, exception_logger from lib.tools import SendMail, exception_logger
from models.nerulforcastmodels import ex_Model, model_losss, model_losss_juxiting, brent_export_pdf, tansuanli_export_pdf, pp_export_pdf, model_losss_juxiting from models.nerulforcastmodels import ex_Model, model_losss, model_losss_juxiting, brent_export_pdf, tansuanli_export_pdf, pp_export_pdf, model_losss_juxiting
import datetime
import glob
import torch import torch
torch.set_float32_matmul_precision("high") torch.set_float32_matmul_precision("high")
global_config.update({
# 核心参数
'logger': logger,
'dataset': dataset,
'y': y,
'is_debug': is_debug,
'is_train': is_train,
'is_fivemodels': is_fivemodels,
'settings': settings,
# 模型参数
'data_set': data_set,
'input_size': input_size,
'horizon': horizon,
'train_steps': train_steps,
'val_check_steps': val_check_steps,
'val_size': val_size,
'test_size': test_size,
'modelsindex': modelsindex,
'rote': rote,
# 特征工程开关
'is_del_corr': is_del_corr,
'is_del_tow_month': is_del_tow_month,
'is_eta': is_eta,
'is_update_eta': is_update_eta,
'early_stop_patience_steps': early_stop_patience_steps,
# 时间参数
'start_year': start_year,
'end_time': end_time,
'freq': freq, # 保持列表结构
# 接口配置
'login_pushreport_url': login_pushreport_url,
'login_data': login_data,
'upload_url': upload_url,
'upload_warning_url': upload_warning_url,
'warning_data': warning_data,
# 查询接口
'query_data_list_item_nos_url': query_data_list_item_nos_url,
'query_data_list_item_nos_data': query_data_list_item_nos_data,
# eta 配置
'APPID': APPID,
'SECRET': SECRET,
'etadata': data,
# 数据库配置
'sqlitedb': sqlitedb,
})
def predict_main(): def predict_main():
""" """
@ -49,228 +102,237 @@ def predict_main():
返回: 返回:
None None
""" """
global end_time # global end_time
signature = BinanceAPI(APPID, SECRET) # signature = BinanceAPI(APPID, SECRET)
etadata = EtaReader(signature=signature, # etadata = EtaReader(signature=signature,
classifylisturl=classifylisturl, # classifylisturl=classifylisturl,
classifyidlisturl=classifyidlisturl, # classifyidlisturl=classifyidlisturl,
edbcodedataurl=edbcodedataurl, # edbcodedataurl=edbcodedataurl,
edbcodelist=edbcodelist, # edbcodelist=edbcodelist,
edbdatapushurl=edbdatapushurl, # edbdatapushurl=edbdatapushurl,
edbdeleteurl=edbdeleteurl, # edbdeleteurl=edbdeleteurl,
edbbusinessurl=edbbusinessurl # edbbusinessurl=edbbusinessurl
) # )
# 获取数据 # # 获取数据
if is_eta: # if is_eta:
logger.info('从eta获取数据...') # logger.info('从eta获取数据...')
signature = BinanceAPI(APPID, SECRET) # signature = BinanceAPI(APPID, SECRET)
etadata = EtaReader(signature=signature, # etadata = EtaReader(signature=signature,
classifylisturl=classifylisturl, # classifylisturl=classifylisturl,
classifyidlisturl=classifyidlisturl, # classifyidlisturl=classifyidlisturl,
edbcodedataurl=edbcodedataurl, # edbcodedataurl=edbcodedataurl,
edbcodelist=edbcodelist, # edbcodelist=edbcodelist,
edbdatapushurl=edbdatapushurl, # edbdatapushurl=edbdatapushurl,
edbdeleteurl=edbdeleteurl, # edbdeleteurl=edbdeleteurl,
edbbusinessurl=edbbusinessurl, # edbbusinessurl=edbbusinessurl,
) # )
df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_yuanyou_data( # df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_yuanyou_data(
data_set=data_set, dataset=dataset) # 原始数据,未处理 # data_set=data_set, dataset=dataset) # 原始数据,未处理
if is_market: # if is_market:
logger.info('从市场信息平台获取数据...') # logger.info('从市场信息平台获取数据...')
try: # try:
# 如果是测试环境最高价最低价取excel文档 # # 如果是测试环境最高价最低价取excel文档
if server_host == '192.168.100.53': # if server_host == '192.168.100.53':
logger.info('从excel文档获取最高价最低价') # logger.info('从excel文档获取最高价最低价')
df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju) # df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju)
else: # else:
logger.info('从市场信息平台获取数据') # logger.info('从市场信息平台获取数据')
df_zhibiaoshuju = get_market_data( # df_zhibiaoshuju = get_market_data(
end_time, df_zhibiaoshuju) # end_time, df_zhibiaoshuju)
except: # except:
logger.info('最高最低价拼接失败') # logger.info('最高最低价拼接失败')
# 保存到xlsx文件的sheet表 # # 保存到xlsx文件的sheet表
with pd.ExcelWriter(os.path.join(dataset, data_set)) as file: # with pd.ExcelWriter(os.path.join(dataset, data_set)) as file:
df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False) # df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False)
df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False) # df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False)
# 数据处理 # # 数据处理
df = datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, y=y, dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture, # df = datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, y=y, dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture,
end_time=end_time) # end_time=end_time)
else: # else:
# 读取数据 # # 读取数据
logger.info('读取本地数据:' + os.path.join(dataset, data_set)) # logger.info('读取本地数据:' + os.path.join(dataset, data_set))
df, df_zhibiaoliebiao = getdata(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj, # df, df_zhibiaoliebiao = getdata(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
is_timefurture=is_timefurture, end_time=end_time) # 原始数据,未处理 # is_timefurture=is_timefurture, end_time=end_time) # 原始数据,未处理
# 更改预测列名称 # # 更改预测列名称
df.rename(columns={y: 'y'}, inplace=True) # df.rename(columns={y: 'y'}, inplace=True)
if is_edbnamelist: # if is_edbnamelist:
df = df[edbnamelist] # df = df[edbnamelist]
df.to_csv(os.path.join(dataset, '指标数据.csv'), index=False) # df.to_csv(os.path.join(dataset, '指标数据.csv'), index=False)
# 保存最新日期的y值到数据库 # # 保存最新日期的y值到数据库
# 取第一行数据存储到数据库中 # # 取第一行数据存储到数据库中
first_row = df[['ds', 'y']].tail(1) # first_row = df[['ds', 'y']].tail(1)
# 判断y的类型是否为float # # 判断y的类型是否为float
if not isinstance(first_row['y'].values[0], float): # if not isinstance(first_row['y'].values[0], float):
logger.info(f'{end_time}预测目标数据为空,跳过') # logger.info(f'{end_time}预测目标数据为空,跳过')
return None # return None
# 将最新真实值保存到数据库 # # 将最新真实值保存到数据库
if not sqlitedb.check_table_exists('trueandpredict'): # if not sqlitedb.check_table_exists('trueandpredict'):
first_row.to_sql('trueandpredict', sqlitedb.connection, index=False) # first_row.to_sql('trueandpredict', sqlitedb.connection, index=False)
else: # else:
for row in first_row.itertuples(index=False): # for row in first_row.itertuples(index=False):
row_dict = row._asdict() # row_dict = row._asdict()
# row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d') # config.logger.info(f'要保存的真实值:{row_dict}')
# row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S') # # 判断ds是否为字符串类型,如果不是则转换为字符串类型
check_query = sqlitedb.select_data( # if isinstance(row_dict['ds'], (pd.Timestamp, datetime.datetime)):
'trueandpredict', where_condition=f"ds = '{row.ds}'") # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d')
if len(check_query) > 0: # elif not isinstance(row_dict['ds'], str):
set_clause = ", ".join( # try:
[f"{key} = '{value}'" for key, value in row_dict.items()]) # row_dict['ds'] = pd.to_datetime(
sqlitedb.update_data( # row_dict['ds']).strftime('%Y-%m-%d')
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'") # except:
continue # logger.warning(f"无法解析的时间格式: {row_dict['ds']}")
sqlitedb.insert_data('trueandpredict', tuple( # # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d')
row_dict.values()), columns=row_dict.keys()) # # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S')
# check_query = sqlitedb.select_data(
# 'trueandpredict', where_condition=f"ds = '{row.ds}'")
# if len(check_query) > 0:
# set_clause = ", ".join(
# [f"{key} = '{value}'" for key, value in row_dict.items()])
# sqlitedb.update_data(
# 'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
# continue
# sqlitedb.insert_data('trueandpredict', tuple(
# row_dict.values()), columns=row_dict.keys())
# 更新accuracy表的y值 # # 更新accuracy表的y值
if not sqlitedb.check_table_exists('accuracy'): # if not sqlitedb.check_table_exists('accuracy'):
pass # pass
else: # else:
update_y = sqlitedb.select_data( # update_y = sqlitedb.select_data(
'accuracy', where_condition="y is null") # 'accuracy', where_condition="y is null")
if len(update_y) > 0: # if len(update_y) > 0:
logger.info('更新accuracy表的y值') # logger.info('更新accuracy表的y值')
# 找到update_y 中ds且df中的y的行 # # 找到update_y 中ds且df中的y的行
update_y = update_y[update_y['ds'] <= end_time] # update_y = update_y[update_y['ds'] <= end_time]
logger.info(f'要更新y的信息{update_y}') # logger.info(f'要更新y的信息{update_y}')
# try: # # try:
for row in update_y.itertuples(index=False): # for row in update_y.itertuples(index=False):
try: # try:
row_dict = row._asdict() # row_dict = row._asdict()
yy = df[df['ds'] == row_dict['ds']]['y'].values[0] # yy = df[df['ds'] == row_dict['ds']]['y'].values[0]
LOW = df[df['ds'] == row_dict['ds']]['Brentzdj'].values[0] # LOW = df[df['ds'] == row_dict['ds']]['Brentzdj'].values[0]
HIGH = df[df['ds'] == row_dict['ds']]['Brentzgj'].values[0] # HIGH = df[df['ds'] == row_dict['ds']]['Brentzgj'].values[0]
sqlitedb.update_data( # sqlitedb.update_data(
'accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'") # 'accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'")
except: # except:
logger.info(f'更新accuracy表的y值失败{row_dict}') # logger.info(f'更新accuracy表的y值失败{row_dict}')
# except Exception as e: # # except Exception as e:
# logger.info(f'更新accuracy表的y值失败{e}') # # logger.info(f'更新accuracy表的y值失败{e}')
import datetime # # 判断当前日期是不是周一
# 判断当前日期是不是周一 # is_weekday = datetime.datetime.now().weekday() == 0
is_weekday = datetime.datetime.now().weekday() == 0 # if is_weekday:
if is_weekday: # logger.info('今天是周一,更新预测模型')
logger.info('今天是周一,更新预测模型') # # 计算最近60天预测残差最低的模型名称
# 计算最近60天预测残差最低的模型名称 # model_results = sqlitedb.select_data(
model_results = sqlitedb.select_data( # 'trueandpredict', order_by="ds DESC", limit="60")
'trueandpredict', order_by="ds DESC", limit="60") # # 删除空值率为90%以上的列
# 删除空值率为90%以上的列 # if len(model_results) > 10:
if len(model_results) > 10: # model_results = model_results.dropna(
model_results = model_results.dropna( # thresh=len(model_results)*0.1, axis=1)
thresh=len(model_results)*0.1, axis=1) # # 删除空行
# 删除空行 # model_results = model_results.dropna()
model_results = model_results.dropna() # modelnames = model_results.columns.to_list()[2:-1]
modelnames = model_results.columns.to_list()[2:-1] # for col in model_results[modelnames].select_dtypes(include=['object']).columns:
for col in model_results[modelnames].select_dtypes(include=['object']).columns: # model_results[col] = model_results[col].astype(np.float32)
model_results[col] = model_results[col].astype(np.float32) # # 计算每个预测值与真实值之间的偏差率
# 计算每个预测值与真实值之间的偏差率 # for model in modelnames:
for model in modelnames: # model_results[f'{model}_abs_error_rate'] = abs(
model_results[f'{model}_abs_error_rate'] = abs( # model_results['y'] - model_results[model]) / model_results['y']
model_results['y'] - model_results[model]) / model_results['y'] # # 获取每行对应的最小偏差率值
# 获取每行对应的最小偏差率值 # min_abs_error_rate_values = model_results.apply(
min_abs_error_rate_values = model_results.apply( # lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1)
lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1) # # 获取每行对应的最小偏差率值对应的列名
# 获取每行对应的最小偏差率值对应的列名 # min_abs_error_rate_column_name = model_results.apply(
min_abs_error_rate_column_name = model_results.apply( # lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1)
lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1) # # 将列名索引转换为列名
# 将列名索引转换为列名 # min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map( # lambda x: x.split('_')[0])
lambda x: x.split('_')[0]) # # 取出现次数最多的模型名称
# 取出现次数最多的模型名称 # most_common_model = min_abs_error_rate_column_name.value_counts().idxmax()
most_common_model = min_abs_error_rate_column_name.value_counts().idxmax() # logger.info(f"最近60天预测残差最低的模型名称{most_common_model}")
logger.info(f"最近60天预测残差最低的模型名称{most_common_model}") # # 保存结果到数据库
# 保存结果到数据库 # if not sqlitedb.check_table_exists('most_model'):
if not sqlitedb.check_table_exists('most_model'): # sqlitedb.create_table(
sqlitedb.create_table( # 'most_model', columns="ds datetime, most_common_model TEXT")
'most_model', columns="ds datetime, most_common_model TEXT") # sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime(
sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime( # '%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
'%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
try: # try:
if is_weekday: # if is_weekday:
# if True: # # if True:
logger.info('今天是周一,发送特征预警') # logger.info('今天是周一,发送特征预警')
# 上传预警信息到数据库 # # 上传预警信息到数据库
warning_data_df = df_zhibiaoliebiao.copy() # warning_data_df = df_zhibiaoliebiao.copy()
warning_data_df = warning_data_df[warning_data_df['停更周期'] > 3][[ # warning_data_df = warning_data_df[warning_data_df['停更周期'] > 3][[
'指标名称', '指标id', '频度', '更新周期', '指标来源', '最后更新时间', '停更周期']] # '指标名称', '指标id', '频度', '更新周期', '指标来源', '最后更新时间', '停更周期']]
# 重命名列名 # # 重命名列名
warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY', # warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY',
'更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'}) # '更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'})
from sqlalchemy import create_engine # from sqlalchemy import create_engine
import urllib # import urllib
global password # global password
if '@' in password: # if '@' in password:
password = urllib.parse.quote_plus(password) # password = urllib.parse.quote_plus(password)
engine = create_engine( # engine = create_engine(
f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}') # f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}')
warning_data_df['WARNING_DATE'] = datetime.date.today().strftime( # warning_data_df['WARNING_DATE'] = datetime.date.today().strftime(
"%Y-%m-%d %H:%M:%S") # "%Y-%m-%d %H:%M:%S")
warning_data_df['TENANT_CODE'] = 'T0004' # warning_data_df['TENANT_CODE'] = 'T0004'
# 插入数据之前查询表数据然后新增id列 # # 插入数据之前查询表数据然后新增id列
existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine) # existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine)
if not existing_data.empty: # if not existing_data.empty:
max_id = existing_data['ID'].astype(int).max() # max_id = existing_data['ID'].astype(int).max()
warning_data_df['ID'] = range( # warning_data_df['ID'] = range(
max_id + 1, max_id + 1 + len(warning_data_df)) # max_id + 1, max_id + 1 + len(warning_data_df))
else: # else:
warning_data_df['ID'] = range(1, 1 + len(warning_data_df)) # warning_data_df['ID'] = range(1, 1 + len(warning_data_df))
warning_data_df.to_sql( # warning_data_df.to_sql(
table_name, con=engine, if_exists='append', index=False) # table_name, con=engine, if_exists='append', index=False)
if is_update_warning_data: # if is_update_warning_data:
upload_warning_info(len(warning_data_df)) # upload_warning_info(len(warning_data_df))
except: # except:
logger.info('上传预警信息到数据库失败') # logger.info('上传预警信息到数据库失败')
if is_corr: # if is_corr:
df = corr_feature(df=df) # df = corr_feature(df=df)
df1 = df.copy() # 备份一下后面特征筛选完之后加入ds y 列用 # df1 = df.copy() # 备份一下后面特征筛选完之后加入ds y 列用
logger.info(f"开始训练模型...") # logger.info(f"开始训练模型...")
row, col = df.shape # row, col = df.shape
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') # now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
ex_Model(df, # ex_Model(df,
horizon=horizon, # horizon=global_config['horizon'],
input_size=input_size, # input_size=global_config['input_size'],
train_steps=train_steps, # train_steps=global_config['train_steps'],
val_check_steps=val_check_steps, # val_check_steps=global_config['val_check_steps'],
early_stop_patience_steps=early_stop_patience_steps, # early_stop_patience_steps=global_config['early_stop_patience_steps'],
is_debug=is_debug, # is_debug=global_config['is_debug'],
dataset=dataset, # dataset=global_config['dataset'],
is_train=is_train, # is_train=global_config['is_train'],
is_fivemodels=is_fivemodels, # is_fivemodels=global_config['is_fivemodels'],
val_size=val_size, # val_size=global_config['val_size'],
test_size=test_size, # test_size=global_config['test_size'],
settings=settings, # settings=global_config['settings'],
now=now, # now=now,
etadata=etadata, # etadata=global_config['etadata'],
modelsindex=modelsindex, # modelsindex=global_config['modelsindex'],
data=data, # data=data,
is_eta=is_eta, # is_eta=global_config['is_eta'],
end_time=end_time, # end_time=global_config['end_time'],
) # )
logger.info('模型训练完成') # logger.info('模型训练完成')
logger.info('训练数据绘图ing') logger.info('训练数据绘图ing')
model_results3 = model_losss(sqlitedb, end_time=end_time) model_results3 = model_losss(sqlitedb, end_time=end_time)

View File

@ -112,11 +112,11 @@ def ex_Model(df, horizon, input_size, train_steps, val_check_steps, early_stop_p
df_test['unique_id'] = 1 df_test['unique_id'] = 1
# 显示划分后的数据集的前几行 # 显示划分后的数据集的前几行
logger.info("Training set head:") config.logger.info("Training set head:")
logger.info(df_train.head()) config.logger.info(df_train.head())
logger.info("\nTesting set head:") config.logger.info("\nTesting set head:")
logger.info(df_test.head()) config.logger.info(df_test.head())
models = [ models = [
NHITS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps, NHITS(h=horizon, input_size=input_size, max_steps=train_steps, val_check_steps=val_check_steps,
@ -185,7 +185,8 @@ def ex_Model(df, horizon, input_size, train_steps, val_check_steps, early_stop_p
models.append(model) models.append(model)
# 创建NeuralForecast实例并训练模型 # 创建NeuralForecast实例并训练模型
nf = NeuralForecast(models=models, freq=freq[0]) # freq = 'B'
nf = NeuralForecast(models=models, freq=config.freq[:1])
from joblib import dump, load from joblib import dump, load
if is_train: if is_train:
@ -207,7 +208,7 @@ def ex_Model(df, horizon, input_size, train_steps, val_check_steps, early_stop_p
import glob import glob
filename = max(glob.glob(os.path.join( filename = max(glob.glob(os.path.join(
dataset, '*.joblib')), key=os.path.getctime) dataset, '*.joblib')), key=os.path.getctime)
logger.info('读取模型:' + filename) config.logger.info('读取模型:' + filename)
nf = load(filename) nf = load(filename)
# 测试集预测 # 测试集预测
nf_test_preds = nf.cross_validation( nf_test_preds = nf.cross_validation(
@ -233,10 +234,10 @@ def ex_Model(df, horizon, input_size, train_steps, val_check_steps, early_stop_p
df_predict.to_csv(os.path.join(dataset, "predict.csv"), index=False) df_predict.to_csv(os.path.join(dataset, "predict.csv"), index=False)
# 将预测结果保存到数据库 # 将预测结果保存到数据库
save_to_database(sqlitedb, df_predict, 'predict', end_time) save_to_database(config.sqlitedb, df_predict, 'predict', end_time)
# 把预测值上传到eta # 把预测值上传到eta
if is_update_eta: if config.is_update_eta:
df_predict['ds'] = pd.to_datetime(df_predict['ds']) df_predict['ds'] = pd.to_datetime(df_predict['ds'])
dates = df_predict['ds'].dt.strftime('%Y-%m-%d') dates = df_predict['ds'].dt.strftime('%Y-%m-%d')
@ -942,12 +943,12 @@ def model_losss(sqlitedb, end_time):
df_combined['CREAT_DATE'] = df_combined['cutoff'] df_combined['CREAT_DATE'] = df_combined['cutoff']
df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要 df_combined4 = df_combined.copy() # 备份df_combined,后面画图需要
# 删除缺失值大于80%的列 # 删除缺失值大于80%的列
logger.info(df_combined.shape) config.logger.info(df_combined.shape)
df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8] df_combined = df_combined.loc[:, df_combined.isnull().mean() < 0.8]
logger.info(df_combined.shape) config.logger.info(df_combined.shape)
# 删除缺失值 # 删除缺失值
df_combined.dropna(inplace=True) df_combined.dropna(inplace=True)
logger.info(df_combined.shape) config.logger.info(df_combined.shape)
# 其他列转为数值类型 # 其他列转为数值类型
df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in [ df_combined = df_combined.astype({col: 'float32' for col in df_combined.columns if col not in [
'CREAT_DATE', 'ds', 'created_dt', 'cutoff']}) 'CREAT_DATE', 'ds', 'created_dt', 'cutoff']})
@ -989,17 +990,17 @@ def model_losss(sqlitedb, end_time):
model_results3 = model_results3.sort_values( model_results3 = model_results3.sort_values(
by='平均平方误差(MSE)', ascending=True) by='平均平方误差(MSE)', ascending=True)
model_results3.to_csv(os.path.join( model_results3.to_csv(os.path.join(
dataset, "model_evaluation.csv"), index=False) config.dataset, "model_evaluation.csv"), index=False)
modelnames = model_results3['模型(Model)'].tolist() modelnames = model_results3['模型(Model)'].tolist()
most_model_name = modelnames[0] most_model_name = modelnames[0]
allmodelnames = modelnames.copy() allmodelnames = modelnames.copy()
# 保存5个最佳模型的名称 # 保存5个最佳模型的名称
if len(modelnames) > 5: if len(modelnames) > 5:
modelnames = modelnames[0:5] modelnames = modelnames[0:5]
if is_fivemodels: if config.is_fivemodels:
pass pass
else: else:
with open(os.path.join(dataset, "best_modelnames.txt"), 'w') as f: with open(os.path.join(config.dataset, "best_modelnames.txt"), 'w') as f:
f.write(','.join(modelnames) + '\n') f.write(','.join(modelnames) + '\n')
# 预测值与真实值对比图 # 预测值与真实值对比图
@ -1014,12 +1015,13 @@ def model_losss(sqlitedb, end_time):
plt.ylabel('价格') plt.ylabel('价格')
plt.title(model+'拟合') plt.title(model+'拟合')
plt.subplots_adjust(hspace=0.5) plt.subplots_adjust(hspace=0.5)
plt.savefig(os.path.join(dataset, '预测值与真实值对比图.png'), bbox_inches='tight') plt.savefig(os.path.join(config.dataset, '预测值与真实值对比图.png'),
bbox_inches='tight')
plt.close() plt.close()
# # 历史数据+预测数据 # # 历史数据+预测数据
# # 拼接未来时间预测 # # 拼接未来时间预测
df_predict = pd.read_csv(os.path.join(dataset, 'predict.csv')) df_predict = pd.read_csv(os.path.join(config.dataset, 'predict.csv'))
df_predict.drop('unique_id', inplace=True, axis=1) df_predict.drop('unique_id', inplace=True, axis=1)
df_predict.dropna(axis=1, inplace=True) df_predict.dropna(axis=1, inplace=True)
@ -1071,7 +1073,7 @@ def model_losss(sqlitedb, end_time):
def add_rote_column(row): def add_rote_column(row):
columns = [] columns = []
for r in names_df.columns: for r in names_df.columns:
if row[r] <= rote: if row[r] <= config.rote:
columns.append(r.split('-')[0]) columns.append(r.split('-')[0])
return pd.Series([columns], index=['columns']) return pd.Series([columns], index=['columns'])
names_df['columns'] = names_df.apply(add_rote_column, axis=1) names_df['columns'] = names_df.apply(add_rote_column, axis=1)
@ -1124,7 +1126,7 @@ def model_losss(sqlitedb, end_time):
df_combined3['ds'] = pd.to_datetime(df_combined3['ds']) df_combined3['ds'] = pd.to_datetime(df_combined3['ds'])
df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d') df_combined3['ds'] = df_combined3['ds'].dt.strftime('%Y-%m-%d')
df_predict2 = df_combined3.tail(horizon) df_predict2 = df_combined3.tail(config.horizon)
# 保存到数据库 # 保存到数据库
if not sqlitedb.check_table_exists('accuracy'): if not sqlitedb.check_table_exists('accuracy'):
@ -1167,7 +1169,8 @@ def model_losss(sqlitedb, end_time):
sqlitedb.update_data( sqlitedb.update_data(
'accuracy', f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}", f"id = {id}") 'accuracy', f"min_price = {row['min_price'].values[0]},max_price = {row['max_price'].values[0]},mean={row['mean'].values[0]}", f"id = {id}")
except: except:
logger.error(f'更新accuracy表中的min_price,max_price,mean值失败row={row}') config.loggererror(
f'更新accuracy表中的min_price,max_price,mean值失败row={row}')
df = accuracy_df.copy() df = accuracy_df.copy()
df['ds'] = pd.to_datetime(df['ds']) df['ds'] = pd.to_datetime(df['ds'])
@ -1256,7 +1259,7 @@ def model_losss(sqlitedb, end_time):
except ValueError: except ValueError:
pass pass
df_combined3.to_csv(os.path.join( df_combined3.to_csv(os.path.join(
dataset, "testandpredict_groupby.csv"), index=False) config.dataset, "testandpredict_groupby.csv"), index=False)
# 历史价格+预测价格 # 历史价格+预测价格
sqlitedb.drop_table('testandpredict_groupby') sqlitedb.drop_table('testandpredict_groupby')
@ -1301,7 +1304,8 @@ def model_losss(sqlitedb, end_time):
plt.xticks(rotation=45) # 日期标签旋转45度防止重叠 plt.xticks(rotation=45) # 日期标签旋转45度防止重叠
plt.ylabel('价格') plt.ylabel('价格')
plt.savefig(os.path.join(dataset, '历史价格-预测值.png'), bbox_inches='tight') plt.savefig(os.path.join(config.dataset, '历史价格-预测值.png'),
bbox_inches='tight')
plt.close() plt.close()
def _plt_modeltopten_predict_ture(df): def _plt_modeltopten_predict_ture(df):
@ -1340,7 +1344,7 @@ def model_losss(sqlitedb, end_time):
plt.ylabel('价格') plt.ylabel('价格')
plt.savefig(os.path.join(dataset, '历史价格-预测值1.png'), plt.savefig(os.path.join(config.dataset, '历史价格-预测值1.png'),
bbox_inches='tight') bbox_inches='tight')
plt.close() plt.close()
@ -1361,7 +1365,8 @@ def model_losss(sqlitedb, end_time):
table.set_fontsize(10) table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识 # 设置表格样式,列数据最小的用绿色标识
plt.savefig(os.path.join(dataset, '预测值表格.png'), bbox_inches='tight') plt.savefig(os.path.join(config.dataset, '预测值表格.png'),
bbox_inches='tight')
plt.close() plt.close()
def _plt_model_results3(): def _plt_model_results3():
@ -1376,7 +1381,8 @@ def model_losss(sqlitedb, end_time):
table.set_fontsize(10) table.set_fontsize(10)
# 设置表格样式,列数据最小的用绿色标识 # 设置表格样式,列数据最小的用绿色标识
plt.savefig(os.path.join(dataset, '模型评估.png'), bbox_inches='tight') plt.savefig(os.path.join(config.dataset, '模型评估.png'),
bbox_inches='tight')
plt.close() plt.close()
# _plt_predict_ture(df_combined3) # _plt_predict_ture(df_combined3)
@ -2198,7 +2204,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
# print(f'绘制第{i+1}个特征{col}与价格散点图时出错:{e}') # print(f'绘制第{i+1}个特征{col}与价格散点图时出错:{e}')
# 添加标题 # 添加标题
content.append(Graphs.draw_title(f'{y}{time}预测报告')) content.append(Graphs.draw_title(f'{config.y}{time}预测报告'))
# 预测结果 # 预测结果
content.append(Graphs.draw_little_title('一、预测结果:')) content.append(Graphs.draw_little_title('一、预测结果:'))
@ -2321,7 +2327,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
'''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。''')) '''相关系数接近0表示两个变量之间不存在线性关系即它们的变化不会随着对方的变化而变化。'''))
for name, group in grouped: for name, group in grouped:
cols = group['指标名称'].tolist() cols = group['指标名称'].tolist()
logger.info(f'开始绘制{name}类指标的相关性直方图') config.logger.info(f'开始绘制{name}类指标的相关性直方图')
cols_subset = cols cols_subset = cols
feature_names = ['y'] + cols_subset feature_names = ['y'] + cols_subset
correlation_matrix = df_zhibiaoshuju[feature_names].corr()['y'] correlation_matrix = df_zhibiaoshuju[feature_names].corr()['y']
@ -2376,7 +2382,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
draw_feature_trend(feature_data_df, negative_corr_features) draw_feature_trend(feature_data_df, negative_corr_features)
# 计算correlation_sum 第一行的相关性的绝对值的总和 # 计算correlation_sum 第一行的相关性的绝对值的总和
correlation_sum = correlation_matrix.abs().sum() correlation_sum = correlation_matrix.abs().sum()
logger.info(f'{name}类指标的相关性总和为:{correlation_sum}') config.logger.info(f'{name}类指标的相关性总和为:{correlation_sum}')
# 分组的相关性总和拼接到grouped_corr # 分组的相关性总和拼接到grouped_corr
goup_corr = pd.DataFrame( goup_corr = pd.DataFrame(
{'指标分类': [name], '指标数量': [len(cols_subset)], '相关性总和': [correlation_sum]}) {'指标分类': [name], '指标数量': [len(cols_subset)], '相关性总和': [correlation_sum]})
@ -2384,7 +2390,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
[grouped_corr, goup_corr], axis=0, ignore_index=True) [grouped_corr, goup_corr], axis=0, ignore_index=True)
# 绘制相关性总和的气泡图 # 绘制相关性总和的气泡图
logger.info(f'开始绘制相关性总和的气泡图') config.logger.info(f'开始绘制相关性总和的气泡图')
plt.figure(figsize=(10, 10)) plt.figure(figsize=(10, 10))
sns.scatterplot(data=grouped_corr, x='相关性总和', y='指标数量', size='相关性总和', sizes=( sns.scatterplot(data=grouped_corr, x='相关性总和', y='指标数量', size='相关性总和', sizes=(
grouped_corr['相关性总和'].min()*5, grouped_corr['相关性总和'].max()*5), hue='指标分类', palette='viridis') grouped_corr['相关性总和'].min()*5, grouped_corr['相关性总和'].max()*5), hue='指标分类', palette='viridis')
@ -2396,7 +2402,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
content.append(Graphs.draw_img(os.path.join(dataset, '指标分类相关性总和的气泡图.png'))) content.append(Graphs.draw_img(os.path.join(dataset, '指标分类相关性总和的气泡图.png')))
content.append(Graphs.draw_text( content.append(Graphs.draw_text(
'气泡图中,横轴为指标分类,纵轴为指标分类下的特征数量,气泡的面积越大表示该分类中特征的相关系数和越大。')) '气泡图中,横轴为指标分类,纵轴为指标分类下的特征数量,气泡的面积越大表示该分类中特征的相关系数和越大。'))
logger.info(f'绘制相关性总和的气泡图结束') config.logger.info(f'绘制相关性总和的气泡图结束')
content.append(Graphs.draw_little_title('模型选择:')) content.append(Graphs.draw_little_title('模型选择:'))
content.append(Graphs.draw_text( content.append(Graphs.draw_text(
f'预测使用了{num_models}个模型进行训练拟合通过评估指标MAE从小到大排列前5个模型的简介如下')) f'预测使用了{num_models}个模型进行训练拟合通过评估指标MAE从小到大排列前5个模型的简介如下'))
@ -2441,7 +2447,7 @@ def brent_export_pdf(num_indicators=475, num_models=21, num_dayindicator=202, in
doc.build(content) doc.build(content)
# pdf 上传到数字化信息平台 # pdf 上传到数字化信息平台
try: try:
if is_update_report: if config.is_update_report:
with open(os.path.join(dataset, reportname), 'rb') as f: with open(os.path.join(dataset, reportname), 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8') base64_data = base64.b64encode(f.read()).decode('utf-8')
upload_data["data"]["fileBase64"] = base64_data upload_data["data"]["fileBase64"] = base64_data