液化气预测调试

This commit is contained in:
workpc 2025-07-08 15:54:05 +08:00
parent d529d0cee6
commit 0ca7553951
3 changed files with 189 additions and 174 deletions

View File

@ -1,7 +1,34 @@
from statsmodels.tools.eval_measures import mse, rmse
from pandas import Series, DataFrame
import cufflinks as cf
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pickle
import warnings
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error
from xgboost import plot_importance, plot_tree
import xgboost as xgb
import plotly.graph_objects as go
import plotly.express as px
import statsmodels.api as sm
from xgboost import XGBRegressor
from sklearn.linear_model import Lasso
import sklearn.datasets as datasets
from sklearn import preprocessing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly import __version__
import time
import random
import seaborn as sn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import json
from datetime import datetime,timedelta
from datetime import datetime, timedelta
# 变量定义
login_url = "http://10.200.32.39/jingbo-api/api/server/login"
@ -38,60 +65,24 @@ read_file_path_name = "液化气数据.xlsx"
one_cols = []
two_cols = []
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import random
import time
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
# 导入机器学习算法模型
from sklearn import preprocessing
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
#导入机器学习算法模型
from sklearn.linear_model import Lasso
from xgboost import XGBRegressor
import statsmodels.api as sm
try:
from keras.preprocessing.sequence import TimeseriesGenerator
except:
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import plotly.express as px
import plotly.graph_objects as go
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.metrics import mean_absolute_error
from statsmodels.tools.eval_measures import mse,rmse
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
import warnings
import pickle
# 切割训练数据和样本数据
from sklearn.metrics import mean_squared_error
#切割训练数据和样本数据
from sklearn.model_selection import train_test_split
#用于模型评分
from sklearn.metrics import r2_score
# 用于模型评分
le = preprocessing.LabelEncoder()
# print(__version__) # requires version >= 1.9.0
import cufflinks as cf
cf.go_offline()
random.seed(100)
@ -104,14 +95,14 @@ def get_head_auth():
text = json.loads(login_res.text)
if text["status"]:
token = text["data"]["accessToken"]
print('获取的token:',token)
print('获取的token:', token)
return token
else:
print("获取认证失败")
return None
def get_data_value(token, dataItemNoList,date):
def get_data_value(token, dataItemNoList, date):
search_data = {
"data": {
"date": date,
@ -120,23 +111,23 @@ def get_data_value(token, dataItemNoList,date):
"funcModule": "数据项",
"funcOperation": "查询"
}
headers = {"Authorization": token}
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
search_res = requests.post(
url=search_url, headers=headers, json=search_data, timeout=(3, 5))
print('数据项查询参数search_data')
print(search_data)
print('数据项查询结果search_res')
print(search_res.text)
try:
search_value = json.loads(search_res.text)["data"]
print("数据项查询结果:", search_value)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
print("Response content:", search_res.text)
return None
return None
if search_value:
return search_value
else:
@ -145,7 +136,8 @@ def get_data_value(token, dataItemNoList,date):
def get_head_push_auth():
login_res = requests.post(url=login_push_url, json=login_push_data, timeout=(3, 5))
login_res = requests.post(
url=login_push_url, json=login_push_data, timeout=(3, 5))
text = json.loads(login_res.text)
if text["status"]:
token = text["data"]["accessToken"]
@ -155,8 +147,7 @@ def get_head_push_auth():
return None
def upload_data_to_system(token_push,date):
def upload_data_to_system(token_push, date):
data = {
"funcModule": "数据表信息列表",
"funcOperation": "新增",
@ -170,94 +161,96 @@ def upload_data_to_system(token_push,date):
]
}
headers = {"Authorization": token_push}
res = requests.post(url=upload_url, headers=headers, json=data, timeout=(3, 5))
res = requests.post(url=upload_url, headers=headers,
json=data, timeout=(3, 5))
print(res.text)
print('预测值:',data['data'][0]['dataValue'])
print('预测值:', data['data'][0]['dataValue'])
price_list = []
def forecast_price():
# df_test = pd.read_csv('定价模型数据收集0212.csv')
df_test = pd.read_excel('液化气数据.xlsx')
df_test.drop([0],inplace=True)
df_test.drop([0], inplace=True)
try:
df_test['Date']=pd.to_datetime(df_test['Date'], format='%m/%d/%Y',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format='%m/%d/%Y', infer_datetime_format=True)
except:
df_test['Date']=pd.to_datetime(df_test['Date'], format=r'%Y-%m-%d',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format=r'%Y-%m-%d', infer_datetime_format=True)
df_test_1 = df_test
df_test_1=df_test_1.fillna(df_test.ffill())
df_test_1=df_test_1.fillna(df_test_1.bfill())
df_test_1 = df_test_1.fillna(df_test.ffill())
df_test_1 = df_test_1.fillna(df_test_1.bfill())
# 选择用于模型训练的列名称
col_for_training = df_test_1.columns
import joblib
Best_model_DalyLGPrice = joblib.load("日度价格预测_液化气最佳模型.pkl")
# 最新的一天为最后一行的数据
df_test_1_Day = df_test_1.tail(1)
# 移除不需要的列
df_test_1_Day.index = df_test_1_Day["Date"]
df_test_1_Day = df_test_1_Day.drop(["Date"], axis= 1)
df_test_1_Day=df_test_1_Day.drop('Price',axis=1)
df_test_1_Day=df_test_1_Day.dropna()
df_test_1_Day = df_test_1_Day.drop(["Date"], axis=1)
df_test_1_Day = df_test_1_Day.drop('Price', axis=1)
df_test_1_Day = df_test_1_Day.dropna()
for col in df_test_1_Day.columns:
df_test_1_Day[col] = pd.to_numeric(df_test_1_Day[col],errors='coerce')
#预测今日价格,显示至小数点后两位
Ypredict_Today=Best_model_DalyLGPrice.predict(df_test_1_Day)
df_test_1_Day[col] = pd.to_numeric(df_test_1_Day[col], errors='coerce')
# 预测今日价格,显示至小数点后两位
Ypredict_Today = Best_model_DalyLGPrice.predict(df_test_1_Day)
df_test_1_Day['日度预测价格']=Ypredict_Today
df_test_1_Day['日度预测价格'] = Ypredict_Today
print(df_test_1_Day['日度预测价格'])
a = df_test_1_Day['日度预测价格']
a = a[0]
a = float(a)
a = round(a,2)
a = round(a, 2)
price_list.append(a)
return a
def optimize_Model():
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import mean_squared_error, r2_score
import pandas as pd
pd.set_option('display.max_rows',40)
pd.set_option('display.max_columns',40)
pd.set_option('display.max_rows', 40)
pd.set_option('display.max_columns', 40)
df_test = pd.read_excel('液化气数据.xlsx')
df_test.drop([0],inplace=True)
df_test.drop([0], inplace=True)
try:
df_test['Date']=pd.to_datetime(df_test['Date'], format='%m/%d/%Y',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format='%m/%d/%Y', infer_datetime_format=True)
except:
df_test['Date']=pd.to_datetime(df_test['Date'], format=r'%Y-%m-%d',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format=r'%Y-%m-%d', infer_datetime_format=True)
#将缺失值补为前一个或者后一个数值
# 将缺失值补为前一个或者后一个数值
df_test_1 = df_test
df_test_1=df_test_1.fillna(df_test.ffill())
df_test_1=df_test_1.fillna(df_test_1.bfill())
df_test_1 = df_test_1.fillna(df_test.ffill())
df_test_1 = df_test_1.fillna(df_test_1.bfill())
df_test_1["Date"] = pd.to_datetime(df_test_1["Date"])
df_test_1.index = df_test_1["Date"]
df_test_1 = df_test_1.drop(["Date"], axis= 1)
df_test_1 = df_test_1.drop(["Date"], axis=1)
df_test_1 = df_test_1.astype('float')
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
#导入机器学习算法模型
# 导入机器学习算法模型
from sklearn.linear_model import Lasso
from xgboost import XGBRegressor
@ -273,7 +266,7 @@ def optimize_Model():
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.metrics import mean_absolute_error
from statsmodels.tools.eval_measures import mse,rmse
from statsmodels.tools.eval_measures import mse, rmse
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
import warnings
@ -281,63 +274,67 @@ def optimize_Model():
from sklearn.metrics import mean_squared_error
#切割训练数据和样本数据
# 切割训练数据和样本数据
from sklearn.model_selection import train_test_split
#用于模型评分
# 用于模型评分
from sklearn.metrics import r2_score
dataset1=df_test_1.drop('Price',axis=1)#.astype(float)
dataset1 = df_test_1.drop('Price', axis=1) # .astype(float)
y=df_test_1['Price']
y = df_test_1['Price']
x=dataset1
x = dataset1
train = x
target = y
#切割数据样本集合测试集
X_train,x_test,y_train,y_true = train_test_split(train,target,test_size=0.2,random_state=0)
# 切割数据样本集合测试集
X_train, x_test, y_train, y_true = train_test_split(
train, target, test_size=0.2, random_state=0)
#模型缩写
Lasso = Lasso(random_state=0)
# 模型缩写
Lasso = Lasso(random_state=0)
XGBR = XGBRegressor(random_state=0)
#训练模型
Lasso.fit(X_train,y_train)
XGBR.fit(X_train,y_train)
#模型拟合
# 训练模型
Lasso.fit(X_train, y_train)
XGBR.fit(X_train, y_train)
# 模型拟合
y_pre_Lasso = Lasso.predict(x_test)
y_pre_XGBR = XGBR.predict(x_test)
#计算Lasso、XGBR、RandomForestR、AdaBoostR、GradientBoostingR、BaggingRegressor各模型的R²
Lasso_score = r2_score(y_true,y_pre_Lasso)
XGBR_score=r2_score(y_true,y_pre_XGBR)
# 计算Lasso、XGBR、RandomForestR、AdaBoostR、GradientBoostingR、BaggingRegressor各模型的R²
Lasso_score = r2_score(y_true, y_pre_Lasso)
XGBR_score = r2_score(y_true, y_pre_XGBR)
#计算Lasso、XGBR的MSE和RMSE
Lasso_MSE=mean_squared_error(y_true, y_pre_Lasso)
XGBR_MSE=mean_squared_error(y_true, y_pre_XGBR)
# 计算Lasso、XGBR的MSE和RMSE
Lasso_MSE = mean_squared_error(y_true, y_pre_Lasso)
XGBR_MSE = mean_squared_error(y_true, y_pre_XGBR)
Lasso_RMSE=np.sqrt(Lasso_MSE)
XGBR_RMSE=np.sqrt(XGBR_MSE)
Lasso_RMSE = np.sqrt(Lasso_MSE)
XGBR_RMSE = np.sqrt(XGBR_MSE)
# 将不同模型的不同误差值整合成一个表格
model_results = pd.DataFrame([['Lasso', Lasso_RMSE, Lasso_score],
['XgBoost', XGBR_RMSE, XGBR_score]],
columns = ['模型(Model)','均方根误差(RMSE)', 'R^2 score'])
#将模型名称(Model)列设置为索引
model_results1=model_results.set_index('模型(Model)')
columns=['模型(Model)', '均方根误差(RMSE)', 'R^2 score'])
# 将模型名称(Model)列设置为索引
model_results1 = model_results.set_index('模型(Model)')
model_results1
#定义plot_feature_importance函数该函数用于计算特征重要性。此部分代码无需调整
def plot_feature_importance(importance,names,model_type):
# 定义plot_feature_importance函数该函数用于计算特征重要性。此部分代码无需调整
def plot_feature_importance(importance, names, model_type):
feature_importance = np.array(importance)
feature_names = np.array(names)
data={'feature_names':feature_names,'feature_importance':feature_importance}
data = {'feature_names': feature_names,
'feature_importance': feature_importance}
fi_df = pd.DataFrame(data)
fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)
fi_df.sort_values(by=['feature_importance'],
ascending=False, inplace=True)
plt.figure(figsize=(10,8))
plt.figure(figsize=(10, 8))
sn.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])
plt.title(model_type + " "+'FEATURE IMPORTANCE')
@ -345,55 +342,57 @@ def optimize_Model():
plt.ylabel('FEATURE NAMES')
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
## Xgboost 模型参数优化-初步
#参考: https://juejin.im/post/6844903661013827598
#每次调参时备选参数数值以同数量级的1、3、10设置即可比如设置1、3、10或0.1、0.3、1.0或0.01,0.03,0.10即可)
# Xgboost 模型参数优化-初步
# 参考: https://juejin.im/post/6844903661013827598
# 每次调参时备选参数数值以同数量级的1、3、10设置即可比如设置1、3、10或0.1、0.3、1.0或0.01,0.03,0.10即可)
from xgboost import XGBRegressor
from sklearn.model_selection import GridSearchCV
estimator = XGBRegressor(random_state=0,
nthread=4,
seed=0
)
nthread=4,
seed=0
)
parameters = {
'max_depth': range (2, 11, 2), # 树的最大深度
'n_estimators': range (50, 101, 10), # 迭代次数
'max_depth': range(2, 11, 2), # 树的最大深度
'n_estimators': range(50, 101, 10), # 迭代次数
'learning_rate': [0.01, 0.03, 0.1, 0.3, 0.5, 1]
}
grid_search_XGB = GridSearchCV(
estimator=estimator,
param_grid=parameters,
# n_jobs = 10,
cv = 3,
# n_jobs = 10,
cv=3,
verbose=True
)
grid_search_XGB.fit(X_train, y_train)
#如果电脑在此步骤报错可能是因为计算量太大超过硬件可支持程度可注释掉“n_jobs=10”一行
# 如果电脑在此步骤报错可能是因为计算量太大超过硬件可支持程度可注释掉“n_jobs=10”一行
best_parameters = grid_search_XGB.best_estimator_.get_params()
y_pred = grid_search_XGB.predict(x_test)
op_XGBR_score = r2_score(y_true,y_pred)
op_XGBR_MSE= mean_squared_error(y_true, y_pred)
op_XGBR_RMSE= np.sqrt(op_XGBR_MSE)
op_XGBR_score = r2_score(y_true, y_pred)
op_XGBR_MSE = mean_squared_error(y_true, y_pred)
op_XGBR_RMSE = np.sqrt(op_XGBR_MSE)
model_results2 = pd.DataFrame([['Optimized_Xgboost', op_XGBR_RMSE, op_XGBR_score]],
columns = ['模型(Model)', '均方根误差(RMSE)', 'R^2 score'])
model_results2=model_results2.set_index('模型(Model)')
columns=['模型(Model)', '均方根误差(RMSE)', 'R^2 score'])
model_results2 = model_results2.set_index('模型(Model)')
try:
results = model_results1.append(model_results2, ignore_index = False)
results = model_results1.append(model_results2, ignore_index=False)
except:
results = pd.concat([model_results1,model_results2],ignore_index=True)
results = pd.concat(
[model_results1, model_results2], ignore_index=True)
import pickle
Pkl_Filename = "日度价格预测_液化气最佳模型.pkl"
Pkl_Filename = "日度价格预测_液化气最佳模型.pkl"
with open(Pkl_Filename, 'wb') as file:
pickle.dump(grid_search_XGB, file)
with open(Pkl_Filename, 'wb') as file:
pickle.dump(grid_search_XGB, file)
def read_xls_data():
"""获取特征项ID"""
@ -411,10 +410,10 @@ def start(date=''):
token = get_head_auth()
if not token:
return
cur_time,cur_time2 = getNow(date)
cur_time, cur_time2 = getNow(date)
print(f"获取{cur_time}数据")
datas = get_data_value(token, one_cols,date=cur_time)
datas = get_data_value(token, one_cols, date=cur_time)
if not datas:
return
@ -425,14 +424,15 @@ def start(date=''):
print(data_value)
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
else:
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
dataItemNo_dataValue[data_value["dataItemNo"]
] = data_value["dataValue"]
for value in one_cols:
if value in dataItemNo_dataValue:
append_rows.append(dataItemNo_dataValue[value])
else:
append_rows.append("")
print('添加的行:',append_rows)
print('添加的行:', append_rows)
save_xls_2(append_rows)
@ -448,7 +448,7 @@ def getNow(date='', offset=0):
tuple: (紧凑日期字符串, 标准日期字符串)
"""
# 日期解析逻辑
from datetime import datetime,timedelta
from datetime import datetime, timedelta
if isinstance(date, datetime):
now = date
else:
@ -466,22 +466,23 @@ def getNow(date='', offset=0):
# 应用日期偏移
now = now - timedelta(days=offset)
# 统一格式化输出
date_str = now.strftime("%Y-%m-%d")
compact_date = date_str.replace("-", "")
return compact_date, date_str
def start_1(date=''):
"""补充昨日数据"""
read_xls_data()
token = get_head_auth()
if not token:
return
cur_time,cur_time2 = getNow(date,offset=1)
cur_time, cur_time2 = getNow(date, offset=1)
print(f"补充{cur_time}数据")
datas = get_data_value(token, one_cols,date=cur_time)
datas = get_data_value(token, one_cols, date=cur_time)
if not datas:
print(f"{cur_time}没有数据")
return
@ -493,14 +494,15 @@ def start_1(date=''):
print(data_value)
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
else:
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
dataItemNo_dataValue[data_value["dataItemNo"]
] = data_value["dataValue"]
for value in one_cols:
if value in dataItemNo_dataValue:
append_rows.append(dataItemNo_dataValue[value])
else:
append_rows.append("")
print('添加的行:',append_rows)
print('添加的行:', append_rows)
save_xls_2(append_rows)
@ -513,10 +515,10 @@ def save_xls_2(append_rows):
# 读取现有数据(假设第一行为列名)
df = pd.read_excel('液化气数据.xlsx', sheet_name=0)
# 转换append_rows为DataFrame
append_rows = pd.DataFrame([append_rows],columns=df.columns)
append_rows = pd.DataFrame([append_rows], columns=df.columns)
# 创建新数据行
new_date = append_rows['Date'].values[0]
dates = df['Date'].to_list()
# 判断日期是否存在
if new_date in dates:
@ -531,16 +533,18 @@ def save_xls_2(append_rows):
print(df.head())
print(df.tail())
print(f"插入 {new_date} 新数据")
# 保存更新后的数据
df.to_excel('液化气数据.xlsx', index=False, engine='openpyxl')
except FileNotFoundError:
# 如果文件不存在则创建新文件
pd.DataFrame([append_rows]).to_excel('液化气数据.xlsx', index=False, engine='openpyxl')
pd.DataFrame([append_rows]).to_excel(
'液化气数据.xlsx', index=False, engine='openpyxl')
except Exception as e:
print(f"保存数据时发生错误: {str(e)}")
def check_data(dataItemNo):
token = get_head_auth()
if not token:
@ -550,6 +554,7 @@ def check_data(dataItemNo):
if not datas:
return
def get_queryDataListItemNos_value(token, url, dataItemNoList, dateStart, dateEnd):
search_data = {
@ -563,14 +568,16 @@ def get_queryDataListItemNos_value(token, url, dataItemNoList, dateStart, dateEn
}
headers = {"Authorization": token}
search_res = requests.post(url=url, headers=headers, json=search_data, timeout=(3, 5))
search_res = requests.post(
url=url, headers=headers, json=search_data, timeout=(3, 5))
search_value = json.loads(search_res.text)["data"]
if search_value:
return search_value
else:
return None
def save_queryDataListItemNos_xls(data_df,dataItemNoList):
def save_queryDataListItemNos_xls(data_df, dataItemNoList):
from datetime import datetime, timedelta
current_year_month = datetime.now().strftime('%Y-%m')
grouped = data_df.groupby("dataDate")
@ -581,11 +588,11 @@ def save_queryDataListItemNos_xls(data_df,dataItemNoList):
# 创建新工作簿
new_workbook = load_workbook('液化气数据.xlsx')
for sheetname in workbook.sheetnames:
sheet = workbook[sheetname]
new_sheet = new_workbook[sheetname]
current_year_month_row = 0
# 查找当前月份数据起始行
for row_idx, row in enumerate(sheet.iter_rows(values_only=True), 1):
@ -599,14 +606,14 @@ def save_queryDataListItemNos_xls(data_df,dataItemNoList):
new_sheet.cell(row=row_idx, column=1, value=date)
for j, dataItemNo in enumerate(dataItemNoList, start=2):
if group[group["dataItemNo"] == dataItemNo]["dataValue"].values:
new_sheet.cell(row=row_idx, column=j,
value=group[group["dataItemNo"] == dataItemNo]["dataValue"].values[0])
new_sheet.cell(row=row_idx, column=j,
value=group[group["dataItemNo"] == dataItemNo]["dataValue"].values[0])
# 保存修改后的xlsx文件
new_workbook.save("液化气数据.xlsx")
def queryDataListItemNos(date=None,token=None):
def queryDataListItemNos(date=None, token=None):
df = pd.read_excel('液化气数据.xlsx')
dataItemNoList = df.iloc[0].tolist()[1:]
if token is None:
@ -624,17 +631,17 @@ def queryDataListItemNos(date=None,token=None):
# 格式化为 YYYYMMDD 格式
dateEnd = current_date.strftime('%Y%m%d')
dateStart = first_day_of_month.strftime('%Y%m%d')
search_value = get_queryDataListItemNos_value(token, queryDataListItemNos_url, dataItemNoList, dateStart, dateEnd)
# dateStart = '20250604'
search_value = get_queryDataListItemNos_value(
token, queryDataListItemNos_url, dataItemNoList, dateStart, dateEnd)
data_df = pd.DataFrame(search_value)
data_df["dataDate"] = pd.to_datetime(data_df["dataDate"])
data_df["dataDate"] = data_df["dataDate"].dt.strftime('%Y-%m-%d')
save_queryDataListItemNos_xls(data_df,dataItemNoList)
save_queryDataListItemNos_xls(data_df, dataItemNoList)
print('当月数据更新完成')
def main(start_date=None,token=None,token_push=None):
def main(start_date=None, token=None, token_push=None):
from datetime import datetime, timedelta
if start_date is None:
start_date = datetime.now()
@ -646,7 +653,7 @@ def main(start_date=None,token=None,token_push=None):
print(date)
try:
# 更新当月数据
queryDataListItemNos(start_date,token)
queryDataListItemNos(start_date, token)
except:
print('当月数据更新失败,单日更新')
start(date)
@ -654,10 +661,18 @@ def main(start_date=None,token=None,token_push=None):
# start(date)
# 训练模型
optimize_Model()
# # 预测&上传预测结果
upload_data_to_system(token_push,start_date)
# 预测&上传预测结果
upload_data_to_system(token_push, start_date)
# forecast_price()
if __name__ == "__main__":
print("运行中ing...")
main()
# 遍历2024-11-25 到 2024-12-3 之间的工作日日期
# for i_time in pd.date_range('2025-7-1', '2025-7-8', freq='D'):
# # try:
# print(i_time)
# main(start_date=i_time)
# except Exception as e:
# continue
main()