液化石油气数据更新到2.28;聚烯烃数据获取
This commit is contained in:
parent
29a65d7c70
commit
d4187e8c1e
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@ -159,7 +159,7 @@ table_name = 'v_tbl_crude_oil_warning'
|
||||
# 开关
|
||||
is_train = False # 是否训练
|
||||
is_debug = False # 是否调试
|
||||
is_eta = False # 是否使用eta接口
|
||||
is_eta = True # 是否使用eta接口
|
||||
is_market = True # 是否通过市场信息平台获取特征 ,在is_eta 为true 的情况下生效
|
||||
is_timefurture = True # 是否使用时间特征
|
||||
is_fivemodels = False # 是否使用之前保存的最佳的5个模型
|
||||
|
@ -106,7 +106,6 @@ modelsindex = {
|
||||
}
|
||||
|
||||
|
||||
|
||||
# eta 上传预测结果的请求体,后面发起请求的时候更改 model datalist 数据
|
||||
data = {
|
||||
"IndexCode": "",
|
||||
@ -132,8 +131,7 @@ data = {
|
||||
ClassifyId = 1161
|
||||
|
||||
|
||||
|
||||
############################################################################################################### 变量定义--测试环境
|
||||
# 变量定义--测试环境
|
||||
server_host = '192.168.100.53'
|
||||
|
||||
login_pushreport_url = f"http://{server_host}:8080/jingbo-dev/api/server/login"
|
||||
@ -202,10 +200,10 @@ dbname = 'jingbo_test'
|
||||
table_name = 'v_tbl_crude_oil_warning'
|
||||
|
||||
|
||||
### 开关
|
||||
# 开关
|
||||
is_train = False # 是否训练
|
||||
is_debug = True # 是否调试
|
||||
is_eta = False # 是否使用eta接口
|
||||
is_eta = True # 是否使用eta接口
|
||||
is_market = False # 是否通过市场信息平台获取特征 ,在is_eta 为true 的情况下生效
|
||||
is_timefurture = True # 是否使用时间特征
|
||||
is_fivemodels = False # 是否使用之前保存的最佳的5个模型
|
||||
@ -218,16 +216,16 @@ is_del_corr = 0.6 # 是否删除相关性高的特征,取值为 0-1 ,0 为不
|
||||
is_del_tow_month = True # 是否删除两个月不更新的特征
|
||||
|
||||
|
||||
|
||||
# 连接到数据库
|
||||
db_mysql = MySQLDB(host=host, user=dbusername, password=password, database=dbname)
|
||||
db_mysql = MySQLDB(host=host, user=dbusername,
|
||||
password=password, database=dbname)
|
||||
db_mysql.connect()
|
||||
print("数据库连接成功", host, dbname, dbusername)
|
||||
|
||||
|
||||
# 数据截取日期
|
||||
start_year = 2020 # 数据开始年份
|
||||
end_time = '2025-01-27' # 数据截取日期
|
||||
end_time = '' # 数据截取日期
|
||||
freq = 'B' # 时间频率,"D": 天 "W": 周"M": 月"Q": 季度"A": 年 "H": 小时 "T": 分钟 "S": 秒 "B": 工作日
|
||||
delweekenday = True if freq == 'B' else False # 是否删除周末数据
|
||||
is_corr = False # 特征是否参与滞后领先提升相关系数
|
||||
@ -235,7 +233,7 @@ add_kdj = False # 是否添加kdj指标
|
||||
if add_kdj and is_edbnamelist:
|
||||
edbnamelist = edbnamelist+['K', 'D', 'J']
|
||||
|
||||
### 模型参数
|
||||
# 模型参数
|
||||
y = 'AVG-金能大唐久泰青州'
|
||||
avg_cols = [
|
||||
'PP:拉丝:1102K:出厂价:青州:国家能源宁煤(日)',
|
||||
@ -254,16 +252,16 @@ early_stop_patience_steps = 5 # 早停的耐心步数
|
||||
test_size = 200 # 测试集大小,定义100,后面使用的时候重新赋值
|
||||
val_size = test_size # 验证集大小,同测试集大小
|
||||
|
||||
### 特征筛选用到的参数
|
||||
# 特征筛选用到的参数
|
||||
k = 100 # 特征筛选数量,如果是0或者值比特征数量大,代表全部特征
|
||||
corr_threshold = 0.6 # 相关性大于0.6的特征
|
||||
rote = 0.06 # 绘图上下界阈值
|
||||
|
||||
### 计算准确率
|
||||
# 计算准确率
|
||||
weight_dict = [0.4, 0.15, 0.1, 0.1, 0.25] # 权重
|
||||
|
||||
|
||||
### 文件
|
||||
# 文件
|
||||
data_set = 'PP指标数据.xlsx' # 数据集文件
|
||||
dataset = 'juxitingdataset' # 数据集文件夹
|
||||
|
||||
@ -280,7 +278,7 @@ reportname = f'PP大模型预测报告--{end_time}.pdf' # 报告文件名
|
||||
reportname = reportname.replace(':', '-') # 替换冒号
|
||||
if end_time == '':
|
||||
end_time = now
|
||||
### 邮件配置
|
||||
# 邮件配置
|
||||
username = '1321340118@qq.com'
|
||||
passwd = 'wgczgyhtyyyyjghi'
|
||||
# recv=['liurui_test@163.com','52585119@qq.com']
|
||||
@ -293,7 +291,7 @@ file=os.path.join(dataset,'reportname')
|
||||
ssl = True
|
||||
|
||||
|
||||
### 日志配置
|
||||
# 日志配置
|
||||
|
||||
# 创建日志目录(如果不存在)
|
||||
log_dir = 'logs'
|
||||
@ -305,8 +303,10 @@ logger = logging.getLogger('my_logger')
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
# 配置文件处理器,将日志记录到文件
|
||||
file_handler = logging.handlers.RotatingFileHandler(os.path.join(log_dir, 'pricepredict.log'), maxBytes=1024 * 1024, backupCount=5)
|
||||
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
||||
file_handler = logging.handlers.RotatingFileHandler(os.path.join(
|
||||
log_dir, 'pricepredict.log'), maxBytes=1024 * 1024, backupCount=5)
|
||||
file_handler.setFormatter(logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
||||
|
||||
# 配置控制台处理器,将日志打印到控制台
|
||||
console_handler = logging.StreamHandler()
|
||||
@ -317,4 +317,3 @@ logger.addHandler(file_handler)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# logger.info('当前配置:'+settings)
|
||||
|
||||
|
@ -103,6 +103,14 @@ global_config = {
|
||||
# ETA配置
|
||||
'APPID': None,
|
||||
'SECRET': None,
|
||||
'classifylisturl': None,
|
||||
'classifyidlisturl': None,
|
||||
'edbcodedataurl': None,
|
||||
'edbcodelist': None,
|
||||
'edbdatapushurl': None,
|
||||
'edbdeleteurl': None,
|
||||
'edbbusinessurl': None,
|
||||
'ClassifyId': None,
|
||||
|
||||
# 数据库配置
|
||||
'sqlitedb': None,
|
||||
@ -1352,7 +1360,7 @@ def style_row(row):
|
||||
|
||||
|
||||
class EtaReader():
|
||||
def __init__(self, signature, classifylisturl, classifyidlisturl, edbcodedataurl, edbcodelist, edbdatapushurl, edbdeleteurl, edbbusinessurl):
|
||||
def __init__(self, signature, classifylisturl, classifyidlisturl, edbcodedataurl, edbcodelist, edbdatapushurl, edbdeleteurl, edbbusinessurl, classifyId):
|
||||
'''
|
||||
初始化 EtaReader 类的实例。
|
||||
|
||||
@ -1377,6 +1385,7 @@ class EtaReader():
|
||||
self.edbcodelist = edbcodelist
|
||||
self.edbdeleteurl = edbdeleteurl
|
||||
self.edbbusinessurl = edbbusinessurl
|
||||
self.classifyId = classifyId
|
||||
|
||||
def filter_yuanyou_data(self, ClassifyName, data):
|
||||
'''
|
||||
@ -1779,7 +1788,6 @@ class EtaReader():
|
||||
return df_zhibiaoshuju, df_zhibiaoliebiao
|
||||
|
||||
def get_eta_api_pp_data(self, data_set, dataset=''):
|
||||
global ClassifyId
|
||||
today = datetime.date.today().strftime("%Y-%m-%d")
|
||||
|
||||
# 定义你的headers,这里可以包含多个参数
|
||||
@ -1818,7 +1826,7 @@ class EtaReader():
|
||||
# 请求成功,处理响应内容
|
||||
# config.logger.info(data.get('Data'))
|
||||
# 定义你想要保留的固定值
|
||||
fixed_value = ClassifyId
|
||||
fixed_value = self.classifyId
|
||||
|
||||
# 遍历列表,只保留那些'category' key的值为固定值的数据项
|
||||
filtered_data = [item for item in data.get(
|
||||
|
314
main_juxiting.py
314
main_juxiting.py
@ -1,12 +1,75 @@
|
||||
# 读取配置
|
||||
from lib.dataread import *
|
||||
from lib.tools import SendMail,exception_logger
|
||||
from models.nerulforcastmodels import ex_Model_Juxiting,model_losss,model_losss_juxiting,brent_export_pdf,tansuanli_export_pdf,pp_export_pdf,model_losss_juxiting
|
||||
|
||||
import glob
|
||||
from lib.dataread import *
|
||||
from config_juxiting import *
|
||||
from lib.tools import SendMail, exception_logger
|
||||
from models.nerulforcastmodels import ex_Model, model_losss, model_losss_juxiting, brent_export_pdf, tansuanli_export_pdf, pp_export_pdf, model_losss_juxiting
|
||||
import datetime
|
||||
import torch
|
||||
torch.set_float32_matmul_precision("high")
|
||||
|
||||
global_config.update({
|
||||
# 核心参数
|
||||
'logger': logger,
|
||||
'dataset': dataset,
|
||||
'y': y,
|
||||
'is_debug': is_debug,
|
||||
'is_train': is_train,
|
||||
'is_fivemodels': is_fivemodels,
|
||||
'settings': settings,
|
||||
|
||||
|
||||
# 模型参数
|
||||
'data_set': data_set,
|
||||
'input_size': input_size,
|
||||
'horizon': horizon,
|
||||
'train_steps': train_steps,
|
||||
'val_check_steps': val_check_steps,
|
||||
'val_size': val_size,
|
||||
'test_size': test_size,
|
||||
'modelsindex': modelsindex,
|
||||
'rote': rote,
|
||||
|
||||
# 特征工程开关
|
||||
'is_del_corr': is_del_corr,
|
||||
'is_del_tow_month': is_del_tow_month,
|
||||
'is_eta': is_eta,
|
||||
'is_update_eta': is_update_eta,
|
||||
'early_stop_patience_steps': early_stop_patience_steps,
|
||||
|
||||
# 时间参数
|
||||
'start_year': start_year,
|
||||
'end_time': end_time or datetime.datetime.now().strftime("%Y-%m-%d"),
|
||||
'freq': freq, # 保持列表结构
|
||||
|
||||
# 接口配置
|
||||
'login_pushreport_url': login_pushreport_url,
|
||||
'login_data': login_data,
|
||||
'upload_url': upload_url,
|
||||
'upload_warning_url': upload_warning_url,
|
||||
'warning_data': warning_data,
|
||||
|
||||
# 查询接口
|
||||
'query_data_list_item_nos_url': query_data_list_item_nos_url,
|
||||
'query_data_list_item_nos_data': query_data_list_item_nos_data,
|
||||
|
||||
# eta 配置
|
||||
'APPID': APPID,
|
||||
'SECRET': SECRET,
|
||||
'etadata': data,
|
||||
'edbcodelist': edbcodelist,
|
||||
'ClassifyId': ClassifyId,
|
||||
'edbcodedataurl': edbcodedataurl,
|
||||
'classifyidlisturl': classifyidlisturl,
|
||||
'edbdatapushurl': edbdatapushurl,
|
||||
'edbdeleteurl': edbdeleteurl,
|
||||
'edbbusinessurl': edbbusinessurl,
|
||||
'ClassifyId': ClassifyId,
|
||||
'classifylisturl': classifylisturl,
|
||||
|
||||
# 数据库配置
|
||||
'sqlitedb': sqlitedb,
|
||||
})
|
||||
|
||||
|
||||
def predict_main():
|
||||
@ -48,31 +111,23 @@ def predict_main():
|
||||
返回:
|
||||
None
|
||||
"""
|
||||
global end_time
|
||||
signature = BinanceAPI(APPID, SECRET)
|
||||
etadata = EtaReader(signature=signature,
|
||||
classifylisturl=classifylisturl,
|
||||
classifyidlisturl=classifyidlisturl,
|
||||
edbcodedataurl=edbcodedataurl,
|
||||
edbcodelist=edbcodelist,
|
||||
edbdatapushurl=edbdatapushurl,
|
||||
edbdeleteurl=edbdeleteurl,
|
||||
edbbusinessurl=edbbusinessurl
|
||||
)
|
||||
end_time = global_config['end_time']
|
||||
# 获取数据
|
||||
if is_eta:
|
||||
logger.info('从eta获取数据...')
|
||||
signature = BinanceAPI(APPID, SECRET)
|
||||
etadata = EtaReader(signature=signature,
|
||||
classifylisturl=classifylisturl,
|
||||
classifyidlisturl=classifyidlisturl,
|
||||
edbcodedataurl=edbcodedataurl,
|
||||
edbcodelist=edbcodelist,
|
||||
edbdatapushurl=edbdatapushurl,
|
||||
edbdeleteurl=edbdeleteurl,
|
||||
edbbusinessurl=edbbusinessurl,
|
||||
classifylisturl=global_config['classifylisturl'],
|
||||
classifyidlisturl=global_config['classifyidlisturl'],
|
||||
edbcodedataurl=global_config['edbcodedataurl'],
|
||||
edbcodelist=global_config['edbcodelist'],
|
||||
edbdatapushurl=global_config['edbdatapushurl'],
|
||||
edbdeleteurl=global_config['edbdeleteurl'],
|
||||
edbbusinessurl=global_config['edbbusinessurl'],
|
||||
classifyId=global_config['ClassifyId'],
|
||||
)
|
||||
df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_pp_data(data_set=data_set, dataset=dataset) # 原始数据,未处理
|
||||
df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_pp_data(
|
||||
data_set=data_set, dataset=dataset) # 原始数据,未处理
|
||||
|
||||
if is_market:
|
||||
logger.info('从市场信息平台获取数据...')
|
||||
@ -83,7 +138,8 @@ def predict_main():
|
||||
df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju)
|
||||
else:
|
||||
logger.info('从市场信息平台获取数据')
|
||||
df_zhibiaoshuju = get_market_data(end_time,df_zhibiaoshuju)
|
||||
df_zhibiaoshuju = get_market_data(
|
||||
end_time, df_zhibiaoshuju)
|
||||
|
||||
except:
|
||||
logger.info('最高最低价拼接失败')
|
||||
@ -93,15 +149,14 @@ def predict_main():
|
||||
df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False)
|
||||
df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False)
|
||||
|
||||
|
||||
# 数据处理
|
||||
df = datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, y=y, dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture,
|
||||
df = datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, y=y, dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture,
|
||||
end_time=end_time)
|
||||
|
||||
else:
|
||||
# 读取数据
|
||||
logger.info('读取本地数据:' + os.path.join(dataset, data_set))
|
||||
df,df_zhibiaoliebiao = getdata_juxiting(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
|
||||
df, df_zhibiaoliebiao = getdata(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
|
||||
is_timefurture=is_timefurture, end_time=end_time) # 原始数据,未处理
|
||||
|
||||
# 更改预测列名称
|
||||
@ -124,19 +179,35 @@ def predict_main():
|
||||
else:
|
||||
for row in first_row.itertuples(index=False):
|
||||
row_dict = row._asdict()
|
||||
row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S')
|
||||
check_query = sqlitedb.select_data('trueandpredict', where_condition=f"ds = '{row.ds}'")
|
||||
config.logger.info(f'要保存的真实值:{row_dict}')
|
||||
# 判断ds是否为字符串类型,如果不是则转换为字符串类型
|
||||
if isinstance(row_dict['ds'], (pd.Timestamp, datetime.datetime)):
|
||||
row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d')
|
||||
elif not isinstance(row_dict['ds'], str):
|
||||
try:
|
||||
row_dict['ds'] = pd.to_datetime(
|
||||
row_dict['ds']).strftime('%Y-%m-%d')
|
||||
except:
|
||||
logger.warning(f"无法解析的时间格式: {row_dict['ds']}")
|
||||
# row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d')
|
||||
# row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S')
|
||||
check_query = sqlitedb.select_data(
|
||||
'trueandpredict', where_condition=f"ds = '{row.ds}'")
|
||||
if len(check_query) > 0:
|
||||
set_clause = ", ".join([f"{key} = '{value}'" for key, value in row_dict.items()])
|
||||
sqlitedb.update_data('trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
|
||||
set_clause = ", ".join(
|
||||
[f"{key} = '{value}'" for key, value in row_dict.items()])
|
||||
sqlitedb.update_data(
|
||||
'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'")
|
||||
continue
|
||||
sqlitedb.insert_data('trueandpredict', tuple(row_dict.values()), columns=row_dict.keys())
|
||||
sqlitedb.insert_data('trueandpredict', tuple(
|
||||
row_dict.values()), columns=row_dict.keys())
|
||||
|
||||
# 更新accuracy表的y值
|
||||
if not sqlitedb.check_table_exists('accuracy'):
|
||||
pass
|
||||
else:
|
||||
update_y = sqlitedb.select_data('accuracy',where_condition="y is null")
|
||||
update_y = sqlitedb.select_data(
|
||||
'accuracy', where_condition="y is null")
|
||||
if len(update_y) > 0:
|
||||
logger.info('更新accuracy表的y值')
|
||||
# 找到update_y 中ds且df中的y的行
|
||||
@ -149,22 +220,24 @@ def predict_main():
|
||||
yy = df[df['ds'] == row_dict['ds']]['y'].values[0]
|
||||
LOW = df[df['ds'] == row_dict['ds']]['Brentzdj'].values[0]
|
||||
HIGH = df[df['ds'] == row_dict['ds']]['Brentzgj'].values[0]
|
||||
sqlitedb.update_data('accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'")
|
||||
sqlitedb.update_data(
|
||||
'accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'")
|
||||
except:
|
||||
logger.info(f'更新accuracy表的y值失败:{row_dict}')
|
||||
# except Exception as e:
|
||||
# logger.info(f'更新accuracy表的y值失败:{e}')
|
||||
|
||||
import datetime
|
||||
# 判断当前日期是不是周一
|
||||
is_weekday = datetime.datetime.now().weekday() == 0
|
||||
if is_weekday:
|
||||
logger.info('今天是周一,更新预测模型')
|
||||
# 计算最近60天预测残差最低的模型名称
|
||||
model_results = sqlitedb.select_data('trueandpredict', order_by="ds DESC", limit="60")
|
||||
model_results = sqlitedb.select_data(
|
||||
'trueandpredict', order_by="ds DESC", limit="60")
|
||||
# 删除空值率为90%以上的列
|
||||
if len(model_results) > 10:
|
||||
model_results = model_results.dropna(thresh=len(model_results)*0.1,axis=1)
|
||||
model_results = model_results.dropna(
|
||||
thresh=len(model_results)*0.1, axis=1)
|
||||
# 删除空行
|
||||
model_results = model_results.dropna()
|
||||
modelnames = model_results.columns.to_list()[2:-1]
|
||||
@ -172,51 +245,61 @@ def predict_main():
|
||||
model_results[col] = model_results[col].astype(np.float32)
|
||||
# 计算每个预测值与真实值之间的偏差率
|
||||
for model in modelnames:
|
||||
model_results[f'{model}_abs_error_rate'] = abs(model_results['y'] - model_results[model]) / model_results['y']
|
||||
model_results[f'{model}_abs_error_rate'] = abs(
|
||||
model_results['y'] - model_results[model]) / model_results['y']
|
||||
# 获取每行对应的最小偏差率值
|
||||
min_abs_error_rate_values = model_results.apply(lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1)
|
||||
min_abs_error_rate_values = model_results.apply(
|
||||
lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1)
|
||||
# 获取每行对应的最小偏差率值对应的列名
|
||||
min_abs_error_rate_column_name = model_results.apply(lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1)
|
||||
min_abs_error_rate_column_name = model_results.apply(
|
||||
lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1)
|
||||
# 将列名索引转换为列名
|
||||
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(lambda x: x.split('_')[0])
|
||||
min_abs_error_rate_column_name = min_abs_error_rate_column_name.map(
|
||||
lambda x: x.split('_')[0])
|
||||
# 取出现次数最多的模型名称
|
||||
most_common_model = min_abs_error_rate_column_name.value_counts().idxmax()
|
||||
logger.info(f"最近60天预测残差最低的模型名称:{most_common_model}")
|
||||
# 保存结果到数据库
|
||||
if not sqlitedb.check_table_exists('most_model'):
|
||||
sqlitedb.create_table('most_model', columns="ds datetime, most_common_model TEXT")
|
||||
sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
|
||||
sqlitedb.create_table(
|
||||
'most_model', columns="ds datetime, most_common_model TEXT")
|
||||
sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime(
|
||||
'%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
|
||||
|
||||
try:
|
||||
# if is_weekday:
|
||||
if True:
|
||||
# logger.info('今天是周一,发送特征预警')
|
||||
# # 上传预警信息到数据库
|
||||
# warning_data_df = df_zhibiaoliebiao.copy()
|
||||
# warning_data_df = warning_data_df[warning_data_df['停更周期']> 3 ][['指标名称', '指标id', '频度','更新周期','指标来源','最后更新时间','停更周期']]
|
||||
# # 重命名列名
|
||||
# warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY', '更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'})
|
||||
# from sqlalchemy import create_engine
|
||||
# import urllib
|
||||
# global password
|
||||
# if '@' in password:
|
||||
# password = urllib.parse.quote_plus(password)
|
||||
if is_weekday:
|
||||
# if True:
|
||||
logger.info('今天是周一,发送特征预警')
|
||||
# 上传预警信息到数据库
|
||||
warning_data_df = df_zhibiaoliebiao.copy()
|
||||
warning_data_df = warning_data_df[warning_data_df['停更周期'] > 3][[
|
||||
'指标名称', '指标id', '频度', '更新周期', '指标来源', '最后更新时间', '停更周期']]
|
||||
# 重命名列名
|
||||
warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY',
|
||||
'更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'})
|
||||
from sqlalchemy import create_engine
|
||||
import urllib
|
||||
global password
|
||||
if '@' in password:
|
||||
password = urllib.parse.quote_plus(password)
|
||||
|
||||
# engine = create_engine(f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}')
|
||||
# warning_data_df['WARNING_DATE'] = datetime.date.today().strftime("%Y-%m-%d %H:%M:%S")
|
||||
# warning_data_df['TENANT_CODE'] = 'T0004'
|
||||
# # 插入数据之前查询表数据然后新增id列
|
||||
# existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine)
|
||||
# if not existing_data.empty:
|
||||
# max_id = existing_data['ID'].astype(int).max()
|
||||
# warning_data_df['ID'] = range(max_id + 1, max_id + 1 + len(warning_data_df))
|
||||
# else:
|
||||
# warning_data_df['ID'] = range(1, 1 + len(warning_data_df))
|
||||
# warning_data_df.to_sql(table_name, con=engine, if_exists='append', index=False)
|
||||
# if is_update_warning_data:
|
||||
# upload_warning_info(len(warning_data_df))
|
||||
engine = create_engine(
|
||||
f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}')
|
||||
warning_data_df['WARNING_DATE'] = datetime.date.today().strftime(
|
||||
"%Y-%m-%d %H:%M:%S")
|
||||
warning_data_df['TENANT_CODE'] = 'T0004'
|
||||
# 插入数据之前查询表数据然后新增id列
|
||||
existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine)
|
||||
if not existing_data.empty:
|
||||
max_id = existing_data['ID'].astype(int).max()
|
||||
warning_data_df['ID'] = range(
|
||||
max_id + 1, max_id + 1 + len(warning_data_df))
|
||||
else:
|
||||
warning_data_df['ID'] = range(1, 1 + len(warning_data_df))
|
||||
warning_data_df.to_sql(
|
||||
table_name, con=engine, if_exists='append', index=False)
|
||||
if is_update_warning_data:
|
||||
upload_warning_info(10)
|
||||
upload_warning_info(len(warning_data_df))
|
||||
except:
|
||||
logger.info('上传预警信息到数据库失败')
|
||||
|
||||
@ -228,44 +311,43 @@ def predict_main():
|
||||
row, col = df.shape
|
||||
|
||||
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
# ex_Model_Juxiting(df,
|
||||
# horizon=horizon,
|
||||
# input_size=input_size,
|
||||
# train_steps=train_steps,
|
||||
# val_check_steps=val_check_steps,
|
||||
# early_stop_patience_steps=early_stop_patience_steps,
|
||||
# is_debug=is_debug,
|
||||
# dataset=dataset,
|
||||
# is_train=is_train,
|
||||
# is_fivemodels=is_fivemodels,
|
||||
# val_size=val_size,
|
||||
# test_size=test_size,
|
||||
# settings=settings,
|
||||
# now=now,
|
||||
# etadata=etadata,
|
||||
# modelsindex=modelsindex,
|
||||
# data=data,
|
||||
# is_eta=is_eta,
|
||||
# end_time=end_time,
|
||||
# )
|
||||
|
||||
ex_Model(df,
|
||||
horizon=global_config['horizon'],
|
||||
input_size=global_config['input_size'],
|
||||
train_steps=global_config['train_steps'],
|
||||
val_check_steps=global_config['val_check_steps'],
|
||||
early_stop_patience_steps=global_config['early_stop_patience_steps'],
|
||||
is_debug=global_config['is_debug'],
|
||||
dataset=global_config['dataset'],
|
||||
is_train=global_config['is_train'],
|
||||
is_fivemodels=global_config['is_fivemodels'],
|
||||
val_size=global_config['val_size'],
|
||||
test_size=global_config['test_size'],
|
||||
settings=global_config['settings'],
|
||||
now=now,
|
||||
etadata=global_config['etadata'],
|
||||
modelsindex=global_config['modelsindex'],
|
||||
data=data,
|
||||
is_eta=global_config['is_eta'],
|
||||
end_time=global_config['end_time'],
|
||||
)
|
||||
|
||||
logger.info('模型训练完成')
|
||||
|
||||
logger.info('训练数据绘图ing')
|
||||
model_results3 = model_losss_juxiting(sqlitedb)
|
||||
model_results3 = model_losss(sqlitedb, end_time=end_time)
|
||||
logger.info('训练数据绘图end')
|
||||
|
||||
# 模型报告
|
||||
logger.info('制作报告ing')
|
||||
title = f'{settings}--{end_time}-预测报告' # 报告标题
|
||||
reportname = f'PP大模型预测报告--{end_time}.pdf' # 报告文件名
|
||||
reportname = reportname.replace(':', '-') # 替换冒号
|
||||
pp_export_pdf(dataset=dataset,num_models = 5 if is_fivemodels else 22,time=end_time,
|
||||
reportname=reportname,sqlitedb=sqlitedb),
|
||||
# # 模型报告
|
||||
# logger.info('制作报告ing')
|
||||
# title = f'{settings}--{end_time}-预测报告' # 报告标题
|
||||
# reportname = f'Brent原油大模型月度预测--{end_time}.pdf' # 报告文件名
|
||||
# reportname = reportname.replace(':', '-') # 替换冒号
|
||||
# brent_export_pdf(dataset=dataset, num_models=5 if is_fivemodels else 22, time=end_time,
|
||||
# reportname=reportname, sqlitedb=sqlitedb),
|
||||
|
||||
logger.info('制作报告end')
|
||||
logger.info('模型训练完成')
|
||||
# logger.info('制作报告end')
|
||||
# logger.info('模型训练完成')
|
||||
|
||||
# # LSTM 单变量模型
|
||||
# ex_Lstm(df,input_seq_len=input_size,output_seq_len=horizon,is_debug=is_debug,dataset=dataset)
|
||||
@ -277,27 +359,27 @@ def predict_main():
|
||||
# # ex_GRU(df)
|
||||
|
||||
# 发送邮件
|
||||
m = SendMail(
|
||||
username=username,
|
||||
passwd=passwd,
|
||||
recv=recv,
|
||||
title=title,
|
||||
content=content,
|
||||
file=max(glob.glob(os.path.join(dataset,'*.pdf')), key=os.path.getctime),
|
||||
ssl=ssl,
|
||||
)
|
||||
# m = SendMail(
|
||||
# username=username,
|
||||
# passwd=passwd,
|
||||
# recv=recv,
|
||||
# title=title,
|
||||
# content=content,
|
||||
# file=max(glob.glob(os.path.join(dataset,'*.pdf')), key=os.path.getctime),
|
||||
# ssl=ssl,
|
||||
# )
|
||||
# m.send_mail()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# global end_time
|
||||
# is_on = True
|
||||
# # 遍历2024-11-25 到 2024-12-3 之间的工作日日期
|
||||
# for i_time in pd.date_range('2025-1-20', '2025-2-6', freq='B'):
|
||||
# end_time = i_time.strftime('%Y-%m-%d')
|
||||
# 遍历2024-11-25 到 2024-12-3 之间的工作日日期
|
||||
# for i_time in pd.date_range('2022-1-1', '2025-3-26', freq='M'):
|
||||
# try:
|
||||
# global_config['end_time'] = i_time.strftime('%Y-%m-%d')
|
||||
# predict_main()
|
||||
# except:
|
||||
# pass
|
||||
# except Exception as e:
|
||||
# logger.info(f'预测失败:{e}')
|
||||
# continue
|
||||
|
||||
predict_main()
|
@ -374,13 +374,12 @@ def predict_main():
|
||||
if __name__ == '__main__':
|
||||
# global end_time
|
||||
# 遍历2024-11-25 到 2024-12-3 之间的工作日日期
|
||||
for i_time in pd.date_range('2022-1-1', '2025-3-26', freq='M'):
|
||||
try:
|
||||
global_config['end_time'] = i_time.strftime('%Y-%m-%d')
|
||||
predict_main()
|
||||
except Exception as e:
|
||||
logger.info(f'预测失败:{e}')
|
||||
continue
|
||||
|
||||
|
||||
# for i_time in pd.date_range('2022-1-1', '2025-3-26', freq='M'):
|
||||
# try:
|
||||
# global_config['end_time'] = i_time.strftime('%Y-%m-%d')
|
||||
# predict_main()
|
||||
# except Exception as e:
|
||||
# logger.info(f'预测失败:{e}')
|
||||
# continue
|
||||
|
||||
predict_main()
|
||||
|
Loading…
Reference in New Issue
Block a user