聚烯烃月度模型执行更新
This commit is contained in:
		
							parent
							
								
									3b51565262
								
							
						
					
					
						commit
						ff7b9bf235
					
				| @ -557,8 +557,7 @@ if __name__ == '__main__': | |||||||
|     #     except Exception as e: |     #     except Exception as e: | ||||||
|     #         logger.info(f'预测失败:{e}') |     #         logger.info(f'预测失败:{e}') | ||||||
|     #         continue |     #         continue | ||||||
|     global_config['end_time'] = '2025-08-01' |     global_config['end_time'] = '2025-08-04' | ||||||
|     global_config['db_mysql'].connect() |  | ||||||
|     predict_main() |     predict_main() | ||||||
| 
 | 
 | ||||||
|     # global_config['end_time'] = '2025-08-01' |     # global_config['end_time'] = '2025-08-01' | ||||||
|  | |||||||
| @ -3,7 +3,7 @@ | |||||||
| from lib.dataread import * | from lib.dataread import * | ||||||
| from config_juxiting_yuedu import * | from config_juxiting_yuedu import * | ||||||
| from lib.tools import SendMail, convert_df_to_pydantic_pp, exception_logger, find_best_models, get_modelsname | from lib.tools import SendMail, convert_df_to_pydantic_pp, exception_logger, find_best_models, get_modelsname | ||||||
| from models.nerulforcastmodels import ex_Model, model_losss_juxiting, pp_bdwd_png, pp_export_pdf | from models.nerulforcastmodels import ex_Model_Juxiting, model_losss_juxiting, pp_bdwd_png, pp_export_pdf | ||||||
| import datetime | import datetime | ||||||
| import torch | import torch | ||||||
| torch.set_float32_matmul_precision("high") | torch.set_float32_matmul_precision("high") | ||||||
| @ -285,203 +285,203 @@ def predict_main(): | |||||||
|     返回: |     返回: | ||||||
|         None |         None | ||||||
|     """ |     """ | ||||||
|     # end_time = global_config['end_time'] |     end_time = global_config['end_time'] | ||||||
|     # signature = BinanceAPI(APPID, SECRET) |     signature = BinanceAPI(APPID, SECRET) | ||||||
|     # etadata = EtaReader(signature=signature, |     etadata = EtaReader(signature=signature, | ||||||
|     #                     classifylisturl=global_config['classifylisturl'], |                         classifylisturl=global_config['classifylisturl'], | ||||||
|     #                     classifyidlisturl=global_config['classifyidlisturl'], |                         classifyidlisturl=global_config['classifyidlisturl'], | ||||||
|     #                     edbcodedataurl=global_config['edbcodedataurl'], |                         edbcodedataurl=global_config['edbcodedataurl'], | ||||||
|     #                     edbcodelist=global_config['edbcodelist'], |                         edbcodelist=global_config['edbcodelist'], | ||||||
|     #                     edbdatapushurl=global_config['edbdatapushurl'], |                         edbdatapushurl=global_config['edbdatapushurl'], | ||||||
|     #                     edbdeleteurl=global_config['edbdeleteurl'], |                         edbdeleteurl=global_config['edbdeleteurl'], | ||||||
|     #                     edbbusinessurl=global_config['edbbusinessurl'], |                         edbbusinessurl=global_config['edbbusinessurl'], | ||||||
|     #                     classifyId=global_config['ClassifyId'], |                         classifyId=global_config['ClassifyId'], | ||||||
|     #                     ) |                         ) | ||||||
|     # # 获取数据 |     # 获取数据 | ||||||
|     # if is_eta: |     if is_eta: | ||||||
|     #     logger.info('从eta获取数据...') |         logger.info('从eta获取数据...') | ||||||
| 
 | 
 | ||||||
|     #     df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_pp_data( |         df_zhibiaoshuju, df_zhibiaoliebiao = etadata.get_eta_api_pp_data( | ||||||
|     #         data_set=data_set, dataset=dataset)  # 原始数据,未处理 |             data_set=data_set, dataset=dataset)  # 原始数据,未处理 | ||||||
| 
 | 
 | ||||||
|     #     if is_market: |         if is_market: | ||||||
|     #         logger.info('从市场信息平台获取数据...') |             logger.info('从市场信息平台获取数据...') | ||||||
|     #         try: |             try: | ||||||
|     #             # 如果是测试环境,最高价最低价取excel文档 |                 # 如果是测试环境,最高价最低价取excel文档 | ||||||
|     #             if server_host == '192.168.100.53': |                 if server_host == '192.168.100.53': | ||||||
|     #                 logger.info('从excel文档获取最高价最低价') |                     logger.info('从excel文档获取最高价最低价') | ||||||
|     #                 df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju) |                     df_zhibiaoshuju = get_high_low_data(df_zhibiaoshuju) | ||||||
|     #             else: |                 else: | ||||||
|     #                 logger.info('从市场信息平台获取数据') |                     logger.info('从市场信息平台获取数据') | ||||||
|     #                 df_zhibiaoshuju = get_market_data( |                     df_zhibiaoshuju = get_market_data( | ||||||
|     #                     end_time, df_zhibiaoshuju) |                         end_time, df_zhibiaoshuju) | ||||||
| 
 | 
 | ||||||
|     #         except: |             except: | ||||||
|     #             logger.info('最高最低价拼接失败') |                 logger.info('最高最低价拼接失败') | ||||||
| 
 | 
 | ||||||
|     #     # 保存到xlsx文件的sheet表 |         # 保存到xlsx文件的sheet表 | ||||||
|     #     with pd.ExcelWriter(os.path.join(dataset, data_set)) as file: |         with pd.ExcelWriter(os.path.join(dataset, data_set)) as file: | ||||||
|     #         df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False) |             df_zhibiaoshuju.to_excel(file, sheet_name='指标数据', index=False) | ||||||
|     #         df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False) |             df_zhibiaoliebiao.to_excel(file, sheet_name='指标列表', index=False) | ||||||
| 
 | 
 | ||||||
|     #     # 数据处理 |         # 数据处理 | ||||||
|     #     df = datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, y=global_config['y'], dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture, |         df = datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, y=global_config['y'], dataset=dataset, add_kdj=add_kdj, is_timefurture=is_timefurture, | ||||||
|     #                             end_time=end_time) |                                 end_time=end_time) | ||||||
| 
 | 
 | ||||||
|     # else: |     else: | ||||||
|     #     # 读取数据 |         # 读取数据 | ||||||
|     #     logger.info('读取本地数据:' + os.path.join(dataset, data_set)) |         logger.info('读取本地数据:' + os.path.join(dataset, data_set)) | ||||||
|     #     df, df_zhibiaoliebiao = getdata_zhoudu_juxiting(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj, |         df, df_zhibiaoliebiao = getdata_zhoudu_juxiting(filename=os.path.join(dataset, data_set), y=y, dataset=dataset, add_kdj=add_kdj, | ||||||
|     #                                                     is_timefurture=is_timefurture, end_time=end_time)  # 原始数据,未处理 |                                                         is_timefurture=is_timefurture, end_time=end_time)  # 原始数据,未处理 | ||||||
| 
 | 
 | ||||||
|     # # 更改预测列名称 |     # 更改预测列名称 | ||||||
|     # df.rename(columns={y: 'y'}, inplace=True) |     df.rename(columns={y: 'y'}, inplace=True) | ||||||
| 
 | 
 | ||||||
|     # if is_edbnamelist: |     if is_edbnamelist: | ||||||
|     #     df = df[edbnamelist] |         df = df[edbnamelist] | ||||||
|     # df.to_csv(os.path.join(dataset, '指标数据.csv'), index=False) |     df.to_csv(os.path.join(dataset, '指标数据.csv'), index=False) | ||||||
|     # # 保存最新日期的y值到数据库 |     # 保存最新日期的y值到数据库 | ||||||
|     # # 取第一行数据存储到数据库中 |     # 取第一行数据存储到数据库中 | ||||||
|     # first_row = df[['ds', 'y']].tail(1) |     first_row = df[['ds', 'y']].tail(1) | ||||||
|     # # 判断y的类型是否为float |     # 判断y的类型是否为float | ||||||
|     # if not isinstance(first_row['y'].values[0], float): |     if not isinstance(first_row['y'].values[0], float): | ||||||
|     #     logger.info(f'{end_time}预测目标数据为空,跳过') |         logger.info(f'{end_time}预测目标数据为空,跳过') | ||||||
|     #     return None |         return None | ||||||
| 
 | 
 | ||||||
|     # # 将最新真实值保存到数据库 |     # 将最新真实值保存到数据库 | ||||||
|     # if not sqlitedb.check_table_exists('trueandpredict'): |     if not sqlitedb.check_table_exists('trueandpredict'): | ||||||
|     #     first_row.to_sql('trueandpredict', sqlitedb.connection, index=False) |         first_row.to_sql('trueandpredict', sqlitedb.connection, index=False) | ||||||
|     # else: |     else: | ||||||
|     #     for row in first_row.itertuples(index=False): |         for row in first_row.itertuples(index=False): | ||||||
|     #         row_dict = row._asdict() |             row_dict = row._asdict() | ||||||
|     #         config.logger.info(f'要保存的真实值:{row_dict}') |             config.logger.info(f'要保存的真实值:{row_dict}') | ||||||
|     #         # 判断ds是否为字符串类型,如果不是则转换为字符串类型 |             # 判断ds是否为字符串类型,如果不是则转换为字符串类型 | ||||||
|     #         if isinstance(row_dict['ds'], (pd.Timestamp, datetime.datetime)): |             if isinstance(row_dict['ds'], (pd.Timestamp, datetime.datetime)): | ||||||
|  |                 row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d') | ||||||
|  |             elif not isinstance(row_dict['ds'], str): | ||||||
|  |                 try: | ||||||
|  |                     row_dict['ds'] = pd.to_datetime( | ||||||
|  |                         row_dict['ds']).strftime('%Y-%m-%d') | ||||||
|  |                 except: | ||||||
|  |                     logger.warning(f"无法解析的时间格式: {row_dict['ds']}") | ||||||
|             # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d') |             # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d') | ||||||
|     #         elif not isinstance(row_dict['ds'], str): |             # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S') | ||||||
|  |             check_query = sqlitedb.select_data( | ||||||
|  |                 'trueandpredict', where_condition=f"ds = '{row.ds}'") | ||||||
|  |             if len(check_query) > 0: | ||||||
|  |                 set_clause = ", ".join( | ||||||
|  |                     [f"{key} = '{value}'" for key, value in row_dict.items()]) | ||||||
|  |                 sqlitedb.update_data( | ||||||
|  |                     'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'") | ||||||
|  |                 continue | ||||||
|  |             sqlitedb.insert_data('trueandpredict', tuple( | ||||||
|  |                 row_dict.values()), columns=row_dict.keys()) | ||||||
|  | 
 | ||||||
|  |     # 更新accuracy表的y值 | ||||||
|  |     if not sqlitedb.check_table_exists('accuracy'): | ||||||
|  |         pass | ||||||
|  |     else: | ||||||
|  |         update_y = sqlitedb.select_data( | ||||||
|  |             'accuracy', where_condition="y is null") | ||||||
|  |         if len(update_y) > 0: | ||||||
|  |             logger.info('更新accuracy表的y值') | ||||||
|  |             # 找到update_y 中ds且df中的y的行 | ||||||
|  |             update_y = update_y[update_y['ds'] <= end_time] | ||||||
|  |             logger.info(f'要更新y的信息:{update_y}') | ||||||
|             # try: |             # try: | ||||||
|     #                 row_dict['ds'] = pd.to_datetime( |             for row in update_y.itertuples(index=False): | ||||||
|     #                     row_dict['ds']).strftime('%Y-%m-%d') |                 try: | ||||||
|     #             except: |                     row_dict = row._asdict() | ||||||
|     #                 logger.warning(f"无法解析的时间格式: {row_dict['ds']}") |                     yy = df[df['ds'] == row_dict['ds']]['y'].values[0] | ||||||
|     #         # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d') |                     LOW = df[df['ds'] == row_dict['ds']]['Brentzdj'].values[0] | ||||||
|     #         # row_dict['ds'] = row_dict['ds'].strftime('%Y-%m-%d %H:%M:%S') |                     HIGH = df[df['ds'] == row_dict['ds']]['Brentzgj'].values[0] | ||||||
|     #         check_query = sqlitedb.select_data( |                     sqlitedb.update_data( | ||||||
|     #             'trueandpredict', where_condition=f"ds = '{row.ds}'") |                         'accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'") | ||||||
|     #         if len(check_query) > 0: |                 except: | ||||||
|     #             set_clause = ", ".join( |                     logger.info(f'更新accuracy表的y值失败:{row_dict}') | ||||||
|     #                 [f"{key} = '{value}'" for key, value in row_dict.items()]) |             # except Exception as e: | ||||||
|     #             sqlitedb.update_data( |             #     logger.info(f'更新accuracy表的y值失败:{e}') | ||||||
|     #                 'trueandpredict', set_clause, where_condition=f"ds = '{row.ds}'") |  | ||||||
|     #             continue |  | ||||||
|     #         sqlitedb.insert_data('trueandpredict', tuple( |  | ||||||
|     #             row_dict.values()), columns=row_dict.keys()) |  | ||||||
| 
 | 
 | ||||||
|     # # 更新accuracy表的y值 |     # 判断当前日期是不是周一 | ||||||
|     # if not sqlitedb.check_table_exists('accuracy'): |     is_weekday = datetime.datetime.now().weekday() == 0 | ||||||
|     #     pass |     if is_weekday: | ||||||
|     # else: |         logger.info('今天是周一,更新预测模型') | ||||||
|     #     update_y = sqlitedb.select_data( |         # 计算最近60天预测残差最低的模型名称 | ||||||
|     #         'accuracy', where_condition="y is null") |         model_results = sqlitedb.select_data( | ||||||
|     #     if len(update_y) > 0: |             'trueandpredict', order_by="ds DESC", limit="60") | ||||||
|     #         logger.info('更新accuracy表的y值') |         # 删除空值率为90%以上的列 | ||||||
|     #         # 找到update_y 中ds且df中的y的行 |         if len(model_results) > 10: | ||||||
|     #         update_y = update_y[update_y['ds'] <= end_time] |             model_results = model_results.dropna( | ||||||
|     #         logger.info(f'要更新y的信息:{update_y}') |                 thresh=len(model_results)*0.1, axis=1) | ||||||
|     #         # try: |         # 删除空行 | ||||||
|     #         for row in update_y.itertuples(index=False): |         model_results = model_results.dropna() | ||||||
|     #             try: |         modelnames = model_results.columns.to_list()[2:-1] | ||||||
|     #                 row_dict = row._asdict() |         for col in model_results[modelnames].select_dtypes(include=['object']).columns: | ||||||
|     #                 yy = df[df['ds'] == row_dict['ds']]['y'].values[0] |             model_results[col] = model_results[col].astype(np.float32) | ||||||
|     #                 LOW = df[df['ds'] == row_dict['ds']]['Brentzdj'].values[0] |         # 计算每个预测值与真实值之间的偏差率 | ||||||
|     #                 HIGH = df[df['ds'] == row_dict['ds']]['Brentzgj'].values[0] |         for model in modelnames: | ||||||
|     #                 sqlitedb.update_data( |             model_results[f'{model}_abs_error_rate'] = abs( | ||||||
|     #                     'accuracy', f"y = {yy},LOW_PRICE = {LOW},HIGH_PRICE = {HIGH}", where_condition=f"ds = '{row_dict['ds']}'") |                 model_results['y'] - model_results[model]) / model_results['y'] | ||||||
|     #             except: |         # 获取每行对应的最小偏差率值 | ||||||
|     #                 logger.info(f'更新accuracy表的y值失败:{row_dict}') |         min_abs_error_rate_values = model_results.apply( | ||||||
|     #         # except Exception as e: |             lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1) | ||||||
|     #         #     logger.info(f'更新accuracy表的y值失败:{e}') |         # 获取每行对应的最小偏差率值对应的列名 | ||||||
|  |         min_abs_error_rate_column_name = model_results.apply( | ||||||
|  |             lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1) | ||||||
|  |         # 将列名索引转换为列名 | ||||||
|  |         min_abs_error_rate_column_name = min_abs_error_rate_column_name.map( | ||||||
|  |             lambda x: x.split('_')[0]) | ||||||
|  |         # 取出现次数最多的模型名称 | ||||||
|  |         most_common_model = min_abs_error_rate_column_name.value_counts().idxmax() | ||||||
|  |         logger.info(f"最近60天预测残差最低的模型名称:{most_common_model}") | ||||||
|  |         # 保存结果到数据库 | ||||||
|  |         if not sqlitedb.check_table_exists('most_model'): | ||||||
|  |             sqlitedb.create_table( | ||||||
|  |                 'most_model', columns="ds datetime, most_common_model TEXT") | ||||||
|  |         sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime( | ||||||
|  |             '%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',)) | ||||||
| 
 | 
 | ||||||
|     # # 判断当前日期是不是周一 |     if is_corr: | ||||||
|     # is_weekday = datetime.datetime.now().weekday() == 0 |         df = corr_feature(df=df) | ||||||
|     # if is_weekday: |  | ||||||
|     #     logger.info('今天是周一,更新预测模型') |  | ||||||
|     #     # 计算最近60天预测残差最低的模型名称 |  | ||||||
|     #     model_results = sqlitedb.select_data( |  | ||||||
|     #         'trueandpredict', order_by="ds DESC", limit="60") |  | ||||||
|     #     # 删除空值率为90%以上的列 |  | ||||||
|     #     if len(model_results) > 10: |  | ||||||
|     #         model_results = model_results.dropna( |  | ||||||
|     #             thresh=len(model_results)*0.1, axis=1) |  | ||||||
|     #     # 删除空行 |  | ||||||
|     #     model_results = model_results.dropna() |  | ||||||
|     #     modelnames = model_results.columns.to_list()[2:-1] |  | ||||||
|     #     for col in model_results[modelnames].select_dtypes(include=['object']).columns: |  | ||||||
|     #         model_results[col] = model_results[col].astype(np.float32) |  | ||||||
|     #     # 计算每个预测值与真实值之间的偏差率 |  | ||||||
|     #     for model in modelnames: |  | ||||||
|     #         model_results[f'{model}_abs_error_rate'] = abs( |  | ||||||
|     #             model_results['y'] - model_results[model]) / model_results['y'] |  | ||||||
|     #     # 获取每行对应的最小偏差率值 |  | ||||||
|     #     min_abs_error_rate_values = model_results.apply( |  | ||||||
|     #         lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].min(), axis=1) |  | ||||||
|     #     # 获取每行对应的最小偏差率值对应的列名 |  | ||||||
|     #     min_abs_error_rate_column_name = model_results.apply( |  | ||||||
|     #         lambda row: row[[f'{model}_abs_error_rate' for model in modelnames]].idxmin(), axis=1) |  | ||||||
|     #     # 将列名索引转换为列名 |  | ||||||
|     #     min_abs_error_rate_column_name = min_abs_error_rate_column_name.map( |  | ||||||
|     #         lambda x: x.split('_')[0]) |  | ||||||
|     #     # 取出现次数最多的模型名称 |  | ||||||
|     #     most_common_model = min_abs_error_rate_column_name.value_counts().idxmax() |  | ||||||
|     #     logger.info(f"最近60天预测残差最低的模型名称:{most_common_model}") |  | ||||||
|     #     # 保存结果到数据库 |  | ||||||
|     #     if not sqlitedb.check_table_exists('most_model'): |  | ||||||
|     #         sqlitedb.create_table( |  | ||||||
|     #             'most_model', columns="ds datetime, most_common_model TEXT") |  | ||||||
|     #     sqlitedb.insert_data('most_model', (datetime.datetime.now().strftime( |  | ||||||
|     #         '%Y-%m-%d %H:%M:%S'), most_common_model,), columns=('ds', 'most_common_model',)) |  | ||||||
| 
 | 
 | ||||||
|     # if is_corr: |     df1 = df.copy()  # 备份一下,后面特征筛选完之后加入ds y 列用 | ||||||
|     #     df = corr_feature(df=df) |     logger.info(f"开始训练模型...") | ||||||
|  |     row, col = df.shape | ||||||
| 
 | 
 | ||||||
|     # df1 = df.copy()  # 备份一下,后面特征筛选完之后加入ds y 列用 |     now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') | ||||||
|     # logger.info(f"开始训练模型...") |     ex_Model_Juxiting(df, | ||||||
|     # row, col = df.shape |              horizon=global_config['horizon'], | ||||||
|  |              input_size=global_config['input_size'], | ||||||
|  |              train_steps=global_config['train_steps'], | ||||||
|  |              val_check_steps=global_config['val_check_steps'], | ||||||
|  |              early_stop_patience_steps=global_config['early_stop_patience_steps'], | ||||||
|  |              is_debug=global_config['is_debug'], | ||||||
|  |              dataset=global_config['dataset'], | ||||||
|  |              is_train=global_config['is_train'], | ||||||
|  |              is_fivemodels=global_config['is_fivemodels'], | ||||||
|  |              val_size=global_config['val_size'], | ||||||
|  |              test_size=global_config['test_size'], | ||||||
|  |              settings=global_config['settings'], | ||||||
|  |              now=now, | ||||||
|  |              etadata=etadata, | ||||||
|  |              modelsindex=global_config['modelsindex'], | ||||||
|  |              data=data, | ||||||
|  |              is_eta=global_config['is_eta'], | ||||||
|  |              end_time=global_config['end_time'], | ||||||
|  |              ) | ||||||
| 
 | 
 | ||||||
|     # now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') |     logger.info('模型训练完成') | ||||||
|     # ex_Model(df, |  | ||||||
|     #          horizon=global_config['horizon'], |  | ||||||
|     #          input_size=global_config['input_size'], |  | ||||||
|     #          train_steps=global_config['train_steps'], |  | ||||||
|     #          val_check_steps=global_config['val_check_steps'], |  | ||||||
|     #          early_stop_patience_steps=global_config['early_stop_patience_steps'], |  | ||||||
|     #          is_debug=global_config['is_debug'], |  | ||||||
|     #          dataset=global_config['dataset'], |  | ||||||
|     #          is_train=global_config['is_train'], |  | ||||||
|     #          is_fivemodels=global_config['is_fivemodels'], |  | ||||||
|     #          val_size=global_config['val_size'], |  | ||||||
|     #          test_size=global_config['test_size'], |  | ||||||
|     #          settings=global_config['settings'], |  | ||||||
|     #          now=now, |  | ||||||
|     #          etadata=etadata, |  | ||||||
|     #          modelsindex=global_config['modelsindex'], |  | ||||||
|     #          data=data, |  | ||||||
|     #          is_eta=global_config['is_eta'], |  | ||||||
|     #          end_time=global_config['end_time'], |  | ||||||
|     #          ) |  | ||||||
| 
 | 
 | ||||||
|     # logger.info('模型训练完成') |     logger.info('训练数据绘图ing') | ||||||
|  |     model_results3 = model_losss_juxiting( | ||||||
|  |         sqlitedb, end_time=global_config['end_time'], is_fivemodels=global_config['is_fivemodels']) | ||||||
|  |     logger.info('训练数据绘图end') | ||||||
| 
 | 
 | ||||||
|     # logger.info('训练数据绘图ing') |     push_market_value() | ||||||
|     # model_results3 = model_losss_juxiting( |  | ||||||
|     #     sqlitedb, end_time=global_config['end_time'], is_fivemodels=global_config['is_fivemodels']) |  | ||||||
|     # logger.info('训练数据绘图end') |  | ||||||
| 
 | 
 | ||||||
|     # push_market_value() |     sql_inset_predict(global_config) | ||||||
| 
 | 
 | ||||||
|     # sql_inset_predict(global_config) |     # 模型报告 | ||||||
| 
 |  | ||||||
|     # # 模型报告 |  | ||||||
|     # logger.info('制作报告ing') |     # logger.info('制作报告ing') | ||||||
|     # title = f'{settings}--{end_time}-预测报告'  # 报告标题 |     # title = f'{settings}--{end_time}-预测报告'  # 报告标题 | ||||||
|     # reportname = f'聚烯烃PP大模型月度预测--{end_time}.pdf'  # 报告文件名 |     # reportname = f'聚烯烃PP大模型月度预测--{end_time}.pdf'  # 报告文件名 | ||||||
| @ -530,7 +530,7 @@ if __name__ == '__main__': | |||||||
|     #         logger.info(f'预测失败:{e}') |     #         logger.info(f'预测失败:{e}') | ||||||
|     #         continue |     #         continue | ||||||
| 
 | 
 | ||||||
|     global_config['end_time'] = '2025-08-01' |     global_config['end_time'] = '2025-08-04' | ||||||
|     predict_main() |     predict_main() | ||||||
|     # push_market_value() |     # push_market_value() | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -513,8 +513,7 @@ if __name__ == '__main__': | |||||||
|     #         continue |     #         continue | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|     global_config['end_time'] = '2025-08-01' |     global_config['end_time'] = '2025-08-04' | ||||||
|     global_config['db_mysql'].connect() |  | ||||||
|     predict_main() |     predict_main() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -457,12 +457,14 @@ def ex_Model_Juxiting(df, horizon, input_size, train_steps, val_check_steps, ear | |||||||
|     # 处理非有限值(NA 或 inf),将其替换为 0 |     # 处理非有限值(NA 或 inf),将其替换为 0 | ||||||
|     df_predict = df_predict.fillna(0) |     df_predict = df_predict.fillna(0) | ||||||
|     df_predict = df_predict.replace([np.inf, -np.inf], 0) |     df_predict = df_predict.replace([np.inf, -np.inf], 0) | ||||||
|     df_predict.astype( |  | ||||||
|         {col: 'int' for col in df_predict.columns if col not in ['ds']}) |  | ||||||
| 
 | 
 | ||||||
|     # 添加预测时间 |     # 添加预测时间 | ||||||
|     df_predict['created_dt'] = end_time |     df_predict['created_dt'] = end_time | ||||||
| 
 | 
 | ||||||
|  |      # 预测结果保留整数(先四舍五入再转换为整数类型) | ||||||
|  |     df_predict = df_predict.round().astype({col: 'int' for col in df_predict.columns if col not in ['ds', 'created_dt']}) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|     # 保存预测值 |     # 保存预测值 | ||||||
|     df_predict.to_csv(os.path.join(config.dataset, "predict.csv"), index=False) |     df_predict.to_csv(os.path.join(config.dataset, "predict.csv"), index=False) | ||||||
| 
 | 
 | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user