丙烯py添加当月数据维护功能

This commit is contained in:
workpc 2025-07-08 11:25:31 +08:00
parent 91387a7241
commit d529d0cee6
3 changed files with 244 additions and 159 deletions

View File

@ -1,3 +1,31 @@
from statsmodels.tools.eval_measures import mse, rmse
from pandas import Series, DataFrame
import cufflinks as cf
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pickle
import warnings
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error
from xgboost import plot_importance, plot_tree
import xgboost as xgb
import plotly.graph_objects as go
import plotly.express as px
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import statsmodels.api as sm
import datetime
from xgboost import XGBRegressor
from sklearn.linear_model import Lasso
import sklearn.datasets as datasets
from sklearn import preprocessing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly import __version__
import random
import seaborn as sn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import json
import xlrd
@ -7,6 +35,7 @@ import time
# 变量定义
login_url = "http://10.200.32.39/jingbo-api/api/server/login"
search_url = "http://10.200.32.39/jingbo-api/api/warehouse/dwDataItem/queryByItemNos"
queryDataListItemNos_url = "http://10.200.32.39/jingbo-api//api/warehouse/dwDataItem/queryDataListItemNos"
login_push_url = "http://10.200.32.39/jingbo-api/api/server/login"
upload_url = "http://10.200.32.39/jingbo-api/api/dw/dataValue/pushDataValueList"
@ -37,67 +66,24 @@ read_file_path_name = "丙烯基础数据收集表.xls"
one_cols = []
two_cols = []
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import random
import time
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from sklearn import preprocessing
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
# 导入机器学习算法模型
from sklearn.linear_model import Lasso
from xgboost import XGBRegressor
import datetime
import statsmodels.api as sm
from keras.preprocessing.sequence import TimeseriesGenerator
import plotly.express as px
import plotly.graph_objects as go
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.metrics import mean_absolute_error
from statsmodels.tools.eval_measures import mse,rmse
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
import warnings
import pickle
from sklearn.metrics import mean_squared_error
# 切割训练数据和样本数据
from sklearn.model_selection import train_test_split
# 用于模型评分
from sklearn.metrics import r2_score
le = preprocessing.LabelEncoder()
# print(__version__) # requires version >= 1.9.0
import cufflinks as cf
cf.go_offline()
random.seed(100)
# 数据获取
def get_head_auth():
@ -121,7 +107,8 @@ def get_data_value(token, dataItemNoList):
"funcOperation": "查询"
}
headers = {"Authorization": token}
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
search_res = requests.post(
url=search_url, headers=headers, json=search_data, timeout=(3, 5))
search_value = json.loads(search_res.text)["data"]
if search_value:
return search_value
@ -166,7 +153,8 @@ def get_cur_time():
def get_head_push_auth():
login_res = requests.post(url=login_push_url, json=login_push_data, timeout=(3, 5))
login_res = requests.post(
url=login_push_url, json=login_push_data, timeout=(3, 5))
text = json.loads(login_res.text)
if text["status"]:
token = text["data"]["accessToken"]
@ -176,7 +164,6 @@ def get_head_push_auth():
return None
def upload_data_to_system(token_push):
data = {
"funcModule": "数据表信息列表",
@ -192,7 +179,8 @@ def upload_data_to_system(token_push):
]
}
headers = {"Authorization": token_push}
res = requests.post(url=upload_url, headers=headers, json=data, timeout=(3, 5))
res = requests.post(url=upload_url, headers=headers,
json=data, timeout=(3, 5))
print(res.text)
@ -214,12 +202,12 @@ def upload_data_to_system(token_push):
# print(res.text)
def forecast_price():
# df_test = pd.read_csv('定价模型数据收集0212.csv')
df_test = pd.read_excel('丙烯基础数据收集表.xls')
df_test = pd.read_excel('丙烯基础数据收集表.xlsx')
df_test.drop([0], inplace=True)
df_test['Date']=pd.to_datetime(df_test['Date'], format='%m/%d/%Y',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format='%Y-%m-%d', infer_datetime_format=True)
# 将缺失值补为前一个或者后一个数值
df_test_1 = df_test
@ -229,10 +217,6 @@ def forecast_price():
# 选择用于模型训练的列名称
col_for_training = df_test_1.columns
import joblib
Best_model_DalyLGPrice = joblib.load("日度价格预测_丙烯最佳模型.pkl")
# 最新的一天为最后一行的数据
@ -243,6 +227,8 @@ def forecast_price():
df_test_1_Day = df_test_1_Day.drop(["Date"], axis=1)
df_test_1_Day = df_test_1_Day.drop('Price', axis=1)
df_test_1_Day = df_test_1_Day.dropna()
# 转换数据类型
df_test_1_Day = df_test_1_Day.astype(float)
# df_test_1_Day
# 预测今日价格,显示至小数点后两位
@ -255,6 +241,8 @@ def forecast_price():
a = float(a)
a = round(a, 2)
return a
def optimize_Model():
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
@ -265,17 +253,20 @@ def optimize_Model():
pd.set_option('display.max_rows', 40)
pd.set_option('display.max_columns', 40)
df_test = pd.read_excel('丙烯基础数据收集表.xls')
df_test = pd.read_excel('丙烯基础数据收集表.xlsx')
df_test.drop([0], inplace=True)
df_test['Date']=pd.to_datetime(df_test['Date'], format='%m/%d/%Y',infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(
df_test['Date'], format='%Y-%m-%d', infer_datetime_format=True)
# 查看每个特征缺失值数量
MisVal_Check = df_test.isnull().sum().sort_values(ascending=False)
# 去掉缺失值百分比>0.4的特征去掉这些特征后的新表格命名为df_test_1
df_MisVal_Check = pd.DataFrame(MisVal_Check,)#
df_MisVal_Check = pd.DataFrame(MisVal_Check,)
df_MisVal_Check_1 = df_MisVal_Check.reset_index()
df_MisVal_Check_1.columns = ['Variable_Name', 'Missing_Number']
df_MisVal_Check_1['Missing_Number']=df_MisVal_Check_1['Missing_Number']/len(df_test)
df_test_1=df_test.drop(df_MisVal_Check_1[df_MisVal_Check_1['Missing_Number']>0.4].Variable_Name,axis = 1)
df_MisVal_Check_1['Missing_Number'] = df_MisVal_Check_1['Missing_Number'] / \
len(df_test)
df_test_1 = df_test.drop(
df_MisVal_Check_1[df_MisVal_Check_1['Missing_Number'] > 0.4].Variable_Name, axis=1)
# 将缺失值补为前一个或者后一个数值
df_test_1 = df_test
df_test_1 = df_test_1.fillna(df_test.ffill())
@ -298,7 +289,7 @@ def optimize_Model():
from datetime import datetime
import statsmodels.api as sm
from keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import plotly.express as px
import plotly.graph_objects as go
@ -330,7 +321,8 @@ def optimize_Model():
target = y
# 切割数据样本集合测试集
X_train,x_test,y_train,y_true = train_test_split(train,target,test_size=0.2,random_state=0)
X_train, x_test, y_train, y_true = train_test_split(
train, target, test_size=0.2, random_state=0)
# 模型缩写
Lasso = Lasso(random_state=0)
@ -361,14 +353,17 @@ def optimize_Model():
model_results1
# 定义plot_feature_importance函数该函数用于计算特征重要性。此部分代码无需调整
def plot_feature_importance(importance, names, model_type):
feature_importance = np.array(importance)
feature_names = np.array(names)
data={'feature_names':feature_names,'feature_importance':feature_importance}
data = {'feature_names': feature_names,
'feature_importance': feature_importance}
fi_df = pd.DataFrame(data)
fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)
fi_df.sort_values(by=['feature_importance'],
ascending=False, inplace=True)
plt.figure(figsize=(10, 8))
sn.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])
@ -378,7 +373,7 @@ def optimize_Model():
plt.ylabel('FEATURE NAMES')
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
## Xgboost 模型参数优化-初步
# Xgboost 模型参数优化-初步
# 参考: https://juejin.im/post/6844903661013827598
# 每次调参时备选参数数值以同数量级的1、3、10设置即可比如设置1、3、10或0.1、0.3、1.0或0.01,0.03,0.10即可)
@ -417,7 +412,7 @@ def optimize_Model():
columns=['模型(Model)', '均方根误差(RMSE)', 'R^2 score'])
model_results2 = model_results2.set_index('模型(Model)')
results = model_results1.append(model_results2, ignore_index = False)
results = pd.concat([model_results1, model_results2], ignore_index=False)
import pickle
Pkl_Filename = "日度价格预测_丙烯最佳模型.pkl"
@ -426,6 +421,96 @@ def optimize_Model():
pickle.dump(grid_search_XGB, file)
def queryDataListItemNos(token=None):
df = pd.read_excel('丙烯基础数据收集表.xlsx')
dataItemNoList = df.iloc[0].tolist()[1:]
if token is None:
token = get_head_auth()
if not token:
print('token获取失败')
return
# 获取当前日期
from datetime import datetime, timedelta
current_date = datetime.now()
# 获取当月1日
first_day_of_month = current_date.replace(day=1)
# 格式化为 YYYYMMDD 格式
dateEnd = current_date.strftime('%Y%m%d')
dateStart = first_day_of_month.strftime('%Y%m%d')
# dateStart = '20241026'
search_value = get_queryDataListItemNos_value(
token, queryDataListItemNos_url, dataItemNoList, dateStart, dateEnd)
data_df = pd.DataFrame(search_value)
data_df["dataDate"] = pd.to_datetime(data_df["dataDate"])
data_df["dataDate"] = data_df["dataDate"].dt.strftime('%Y-%m-%d')
save_queryDataListItemNos_xls(data_df, dataItemNoList)
print('当月数据更新完成')
def save_queryDataListItemNos_xls(data_df, dataItemNoList):
from datetime import datetime, timedelta
current_year_month = datetime.now().strftime('%Y-%m')
grouped = data_df.groupby("dataDate")
# 使用openpyxl打开xlsx文件
from openpyxl import load_workbook
workbook = load_workbook('丙烯基础数据收集表.xlsx')
# 创建新工作簿
new_workbook = load_workbook('丙烯基础数据收集表.xlsx')
for sheetname in workbook.sheetnames:
sheet = workbook[sheetname]
new_sheet = new_workbook[sheetname]
current_year_month_row = 0
# 查找当前月份数据起始行
for row_idx, row in enumerate(sheet.iter_rows(values_only=True), 1):
if str(row[0]).startswith(current_year_month):
current_year_month_row += 1
# 追加新数据
if sheetname == workbook.sheetnames[0]:
start_row = sheet.max_row - current_year_month_row + 1
for row_idx, (date, group) in enumerate(grouped, start=start_row):
new_sheet.cell(row=row_idx, column=1, value=date)
for j, dataItemNo in enumerate(dataItemNoList, start=2):
if group[group["dataItemNo"] == dataItemNo]["dataValue"].values:
new_sheet.cell(row=row_idx, column=j,
value=group[group["dataItemNo"] == dataItemNo]["dataValue"].values[0])
# 保存修改后的xlsx文件
new_workbook.save("丙烯基础数据收集表.xlsx")
def get_queryDataListItemNos_value(token, url, dataItemNoList, dateStart, dateEnd):
search_data = {
"funcModule": "数据项",
"funcOperation": "查询",
"data": {
"dateStart": dateStart,
"dateEnd": dateEnd,
"dataItemNoList": dataItemNoList # 数据项编码,代表 brent最低价和最高价
}
}
headers = {"Authorization": token}
search_res = requests.post(
url=url, headers=headers, json=search_data, timeout=(3, 5))
search_value = json.loads(search_res.text)["data"]
if search_value:
return search_value
else:
return None
def read_xls_data():
@ -456,8 +541,6 @@ def read_xls_data():
# workbook.close()
def start():
read_xls_data()
@ -480,7 +563,8 @@ def start():
print(data_value)
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
else:
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
dataItemNo_dataValue[data_value["dataItemNo"]
] = data_value["dataValue"]
for value in one_cols[1:]:
if value in dataItemNo_dataValue:
@ -501,7 +585,6 @@ def start_1():
if not token:
return
datas = get_data_value(token, one_cols[1:])
# if not datas:
# return
@ -514,7 +597,8 @@ def start_1():
print(data_value)
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
else:
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
dataItemNo_dataValue[data_value["dataItemNo"]
] = data_value["dataValue"]
for value in one_cols[1:]:
if value in dataItemNo_dataValue:
@ -523,10 +607,10 @@ def start_1():
append_rows.append("")
save_xls_1(append_rows)
# data_list.append(three_cols)
# write_xls(data_list)
def save_xls_1(append_rows):
# 打开xls文件
@ -571,8 +655,6 @@ def save_xls_1(append_rows):
new_workbook.save("丙烯基础数据收集表.xls")
def check_data(dataItemNo):
token = get_head_auth()
if not token:
@ -628,5 +710,8 @@ def save_xls(append_rows):
if __name__ == "__main__":
start()
# start()
queryDataListItemNos()
optimize_Model()
forecast_price()
upload_data_to_system(token_push=get_head_push_auth())