预警信息上传到北京环境

This commit is contained in:
workpc 2024-12-03 14:56:06 +08:00
parent c5d9687f4e
commit 08c10b19b9
5 changed files with 22744 additions and 11040 deletions

View File

@ -197,8 +197,8 @@ warning_data = {
### 开关
is_train = True # 是否训练
is_debug = True # 是否调试
is_eta = True # 是否使用eta接口
is_debug = False # 是否调试
is_eta = False # 是否使用eta接口
is_timefurture = True # 是否使用时间特征
is_fivemodels = False # 是否使用之前保存的最佳的5个模型
is_edbcode = True # 特征使用edbcoding列表中的
@ -211,13 +211,13 @@ is_update_warning_data = False # 是否上传预警数据
# url: jdbc:mysql://192.168.101.27:3306/jingbo_test?autoReconnect=true&useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&transformedBitIsBoolean=true&useSSL=false&serverTimezone=GMT%2B8&nullCatalogMeansCurrent=true
host = '192.168.101.27'
prot = 3306
username ='root'
port = 3306
dbusername ='root'
password = '123456'
dbname = 'jingbo_test'
table_name = 'v_tbl_crude_oil_warning'
db_mysql = MySQLDB(host=host, user=username, password=password, database=dbname)
db_mysql = MySQLDB(host=host, user=dbusername, password=password, database=dbname)
# 连接到数据库
db_mysql.connect()

View File

@ -718,7 +718,7 @@ def getdata(filename, datecol='date',y='y',dataset='',add_kdj=False,is_timefurtu
# 日期字符串转为datatime
df = datachuli(df_zhibiaoshuju,df_zhibiaoliebiao,datecol,y = y,dataset=dataset,add_kdj=add_kdj,is_timefurture=is_timefurture,end_time=end_time)
return df
return df,df_zhibiaoliebiao
def getdata_juxiting(filename, datecol='date',y='y',dataset='',add_kdj=False,is_timefurture=False,end_time=''):
logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
# 判断后缀名 csv或excel
@ -1147,7 +1147,7 @@ class EtaReader():
'''
# 构建新的DataFrame df df1
df = pd.DataFrame(columns=['指标分类', '指标名称', '指标id', '频度','指标来源','来源id','最后更新时间','更新周期','预警日期'])
df = pd.DataFrame(columns=['指标分类', '指标名称', '指标id', '频度','指标来源','来源id','最后更新时间','更新周期','预警日期','停更周期'])
df1 = pd.DataFrame(columns=['DataTime'])
@ -1231,14 +1231,18 @@ class EtaReader():
most_common_weekday = weekday_counter.most_common(1)[0][0]
# 计算两周后的日期
warning_date = (datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d") + datetime.timedelta(weeks=2)).strftime("%Y-%m-%d")
stop_update_period = (datetime.datetime.strptime(today, "%Y-%m-%d") - datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d")).days // 7
except IndexError:
most_common_weekday = '其他'
stop_update_period = 0
if '' in Frequency:
most_common_weekday = '每天'
warning_date = (datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d") + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
stop_update_period = (datetime.datetime.strptime(today, "%Y-%m-%d") - datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d")).days
# 保存频度 指标名称 分类 指标id 到 df
df2 = pd.DataFrame({'指标分类': ClassifyName, '指标名称': EdbName, '指标id': EdbCode, '频度': Frequency,'指标来源':SourceName,'来源id':Source,'最后更新时间':edbname_df['DataTime'].values[-1],'更新周期':most_common_weekday,'预警日期':warning_date},index=[0])
df2 = pd.DataFrame({'指标分类': ClassifyName, '指标名称': EdbName, '指标id': EdbCode, '频度': Frequency,'指标来源':SourceName,'来源id':Source,'最后更新时间':edbname_df['DataTime'].values[-1],'更新周期':most_common_weekday,'预警日期':warning_date,'停更周期':stop_update_period},index=[0])
# df = pd.merge(df, df2, how='outer')
df = pd.concat([df, df2])
@ -1272,7 +1276,6 @@ class EtaReader():
with pd.ExcelWriter(os.path.join(dataset,data_set)) as file:
df1.to_excel(file, sheet_name='指标数据', index=False)
df.to_excel(file, sheet_name='指标列表', index=False)
df_zhibiaoshuju = df1.copy()
df_zhibiaoliebiao = df.copy()
return df_zhibiaoshuju,df_zhibiaoliebiao

File diff suppressed because it is too large Load Diff

11069
logs/pricepredict.log.2 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -81,7 +81,7 @@ def predict_main():
else:
# 读取数据
logger.info('读取本地数据:' + os.path.join(dataset, data_set))
df = getdata(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
df,df_zhibiaoliebiao = getdata(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj,
is_timefurture=is_timefurture, end_time=end_time) # 原始数据,未处理
# 更改预测列名称
@ -139,6 +139,26 @@ def predict_main():
sqlitedb.create_table('most_model', columns="ds datetime, most_common_model TEXT")
sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',))
if is_weekday:
logger.info('今天是周一,更新预测模型')
# 上传预警信息到数据库
warning_data_df = df_zhibiaoliebiao.copy()
warning_data_df = warning_data_df[warning_data_df['停更周期']> 3 ][['指标名称', '指标id', '频度','更新周期','指标来源','最后更新时间','停更周期']]
# 重命名列名
warning_data_df = warning_data_df.rename(columns={'指标名称': 'INDICATOR_NAME', '指标id': 'INDICATOR_ID', '频度': 'FREQUENCY', '更新周期': 'UPDATE_FREQUENCY', '指标来源': 'DATA_SOURCE', '最后更新时间': 'LAST_UPDATE_DATE', '停更周期': 'UPDATE_SUSPENSION_CYCLE'})
from sqlalchemy import create_engine
engine = create_engine(f'mysql+pymysql://{dbusername}:{password}@{host}:{port}/{dbname}')
warning_data_df['WARNING_DATE'] = datetime.date.today().strftime("%Y-%m-%d")
# 插入数据之前查询表数据然后新增id列
existing_data = pd.read_sql(f"SELECT * FROM {table_name}", engine)
if not existing_data.empty:
max_id = existing_data['id'].max()
warning_data_df['id'] = range(max_id + 1, max_id + 1 + len(warning_data_df))
else:
warning_data_df['id'] = range(1, 1 + len(warning_data_df))
warning_data_df.to_sql(table_name, con=engine, if_exists='append', index=False)
if is_corr:
df = corr_feature(df=df)