2024-11-01 17:33:48 +08:00
{
"cells": [
{
"cell_type": "code",
2025-03-03 17:54:15 +08:00
"execution_count": null,
2024-11-01 17:33:48 +08:00
"metadata": {},
"outputs": [
2025-03-03 17:54:15 +08:00
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\EDY\\AppData\\Roaming\\Python\\Python311\\site-packages\\keras\\src\\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.\n",
"\n"
]
},
2024-11-01 17:33:48 +08:00
{
"data": {
"text/html": [
" <script type=\"text/javascript\">\n",
" window.PlotlyConfig = {MathJaxConfig: 'local'};\n",
" if (window.MathJax && window.MathJax.Hub && window.MathJax.Hub.Config) {window.MathJax.Hub.Config({SVG: {font: \"STIX-Web\"}});}\n",
" if (typeof require !== 'undefined') {\n",
" require.undef(\"plotly\");\n",
" requirejs.config({\n",
" paths: {\n",
" 'plotly': ['https://cdn.plot.ly/plotly-2.12.1.min']\n",
" }\n",
" });\n",
" require(['plotly'], function(Plotly) {\n",
" window._Plotly = Plotly;\n",
" });\n",
" }\n",
" </script>\n",
" "
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import requests\n",
"import json\n",
"import xlrd\n",
"import xlwt\n",
"from datetime import datetime\n",
"import time\n",
"# 变量定义\n",
"login_url = \"http://10.200.32.39/jingbo-api/api/server/login\"\n",
"search_url = \"http://10.200.32.39/jingbo-api/api/warehouse/dwDataItem/queryByItemNos\"\n",
"\n",
"login_push_url = \"http://10.200.32.39/jingbo-api/api/server/login\"\n",
"upload_url = \"http://10.200.32.39/jingbo-api/api/dw/dataValue/pushDataValueList\"\n",
"\n",
"login_data = {\n",
" \"data\": {\n",
" \"account\": \"api_dev\",\n",
" \"password\": \"ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=\",\n",
" \"tenantHashCode\": \"8a4577dbd919675758d57999a1e891fe\",\n",
" \"terminal\": \"API\"\n",
" },\n",
" \"funcModule\": \"API\",\n",
" \"funcOperation\": \"获取token\"\n",
"}\n",
"\n",
"login_push_data = {\n",
" \"data\": {\n",
" \"account\": \"api_dev\",\n",
" \"password\": \"ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=\",\n",
" \"tenantHashCode\": \"8a4577dbd919675758d57999a1e891fe\",\n",
" \"terminal\": \"API\"\n",
" },\n",
" \"funcModule\": \"API\",\n",
" \"funcOperation\": \"获取token\"\n",
"}\n",
"\n",
"read_file_path_name = \"沥青数据项.xls\"\n",
"one_cols = []\n",
"two_cols = []\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sn\n",
"import random\n",
"import time\n",
"\n",
"\n",
"\n",
"\n",
"from plotly import __version__\n",
"from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n",
"\n",
"from sklearn import preprocessing\n",
"\n",
"from pandas import Series,DataFrame\n",
"\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import sklearn.datasets as datasets\n",
"\n",
"#导入机器学习算法模型\n",
"from sklearn.linear_model import Lasso\n",
"from xgboost import XGBRegressor\n",
"\n",
"import datetime\n",
"import statsmodels.api as sm\n",
"# from keras.preprocessing.sequence import TimeseriesGenerator\n",
"from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator\n",
"\n",
"import plotly.express as px\n",
"import plotly.graph_objects as go\n",
"\n",
"import xgboost as xgb\n",
"from xgboost import plot_importance, plot_tree\n",
"from sklearn.metrics import mean_absolute_error\n",
"from statsmodels.tools.eval_measures import mse,rmse\n",
"from sklearn.model_selection import GridSearchCV\n",
"from xgboost import XGBRegressor\n",
"import warnings\n",
"import pickle\n",
"\n",
"from sklearn.metrics import mean_squared_error\n",
"\n",
"#切割训练数据和样本数据\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"#用于模型评分\n",
"from sklearn.metrics import r2_score\n",
"\n",
"le = preprocessing.LabelEncoder()\n",
"\n",
"# print(__version__) # requires version >= 1.9.0\n",
"\n",
"\n",
"import cufflinks as cf\n",
"cf.go_offline()\n",
"\n",
"random.seed(100)\n",
"\n",
"%matplotlib inline\n",
"\n",
"# 数据获取\n",
"\n",
"def get_head_auth():\n",
" login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))\n",
" text = json.loads(login_res.text)\n",
" if text[\"status\"]:\n",
" token = text[\"data\"][\"accessToken\"]\n",
" return token\n",
" else:\n",
" print(\"获取认证失败\")\n",
" return None\n",
"\n",
"\n",
"def get_data_value(token, dataItemNoList,date=''):\n",
"\n",
" search_data = {\n",
" \"data\": {\n",
" \"date\": get_cur_time(date)[0],\n",
" \"dataItemNoList\": dataItemNoList\n",
" },\n",
" \"funcModule\": \"数据项\",\n",
" \"funcOperation\": \"查询\"\n",
" }\n",
" headers = {\"Authorization\": token}\n",
" search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))\n",
" search_value = json.loads(search_res.text)[\"data\"]\n",
" if search_value:\n",
" return search_value\n",
" else:\n",
" print(\"今天没有新数据\")\n",
" return None\n",
"\n",
"\n",
"# xls文件处理\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"def write_xls(data,date):\n",
" # 创建一个Workbook对象\n",
" workbook = xlwt.Workbook()\n",
"\n",
" # 创建一个Sheet对象, 可指定名称\n",
" sheet = workbook.load('Sheet1')\n",
"\n",
" # 写入数据行\n",
" for row_index, row_data in enumerate(data):\n",
" for col_index, cell_data in enumerate(row_data):\n",
" sheet.write(row_index, col_index, cell_data)\n",
"\n",
" # 保存Workbook到文件\n",
" workbook.save(get_cur_time(date)[0] + '.xls')\n",
"\n",
"\n",
"def get_cur_time(date = ''):\n",
" if date == '':\n",
" import datetime\n",
" now = datetime.datetime.now()\n",
" else:\n",
" now = date\n",
" year = now.year\n",
" month = now.month\n",
" day = now.day\n",
"\n",
" if month < 10:\n",
" month = \"0\" + str(month)\n",
" if day < 10:\n",
" day = \"0\" + str(day)\n",
" cur_time = str(year) + str(month) + str(day)\n",
" cur_time2 = str(year) + \"-\" + str(month) + \"-\" + str(day)\n",
"# cur_time = '20231007'\n",
"# cur_time2 = '2023-10-07'\n",
" return cur_time, cur_time2\n",
"\n",
"\n",
"def get_head_push_auth():\n",
" login_res = requests.post(url=login_push_url, json=login_push_data, timeout=(3, 5))\n",
" text = json.loads(login_res.text)\n",
" if text[\"status\"]:\n",
" token = text[\"data\"][\"accessToken\"]\n",
" return token\n",
" else:\n",
" print(\"获取认证失败\")\n",
" return None\n",
"\n",
"\n",
"\n",
"def upload_data_to_system(token_push,date):\n",
" data = {\n",
" \"funcModule\": \"数据表信息列表\",\n",
" \"funcOperation\": \"新增\",\n",
" \"data\": [\n",
" {\"dataItemNo\": \"C01100036|Forecast_Price|ACN\",\n",
" \"dataDate\": get_cur_time(date)[0],\n",
" \"dataStatus\": \"add\",\n",
" \"dataValue\": forecast_price()\n",
" }\n",
"\n",
" ]\n",
" }\n",
" headers = {\"Authorization\": token_push}\n",
" res = requests.post(url=upload_url, headers=headers, json=data, timeout=(3, 5))\n",
" print(res.text)\n",
"\n",
" \n",
"# def upload_data_to_system(token):\n",
"# data = {\n",
"# \"funcModule\": \"数据表信息列表\",\n",
"# \"funcOperation\": \"新增\",\n",
"# \"data\": [\n",
"# {\"dataItemNo\": \"C01100036|Forecast_ Price|ACN\",\n",
"# \"dataDate\": '20230706',\n",
"# \"dataStatus\": \"add\",\n",
"# \"dataValue\": 3780.0\n",
"# }\n",
"\n",
"# ]\n",
"# }\n",
"# headers = {\"Authorization\": token}\n",
"# res = requests.post(url=upload_url, headers=headers, json=data, timeout=(3, 5))\n",
"# print(res.text)\n",
"\n",
"def forecast_price():\n",
" df_test = pd.read_excel('沥青数据项.xls',sheet_name='数据项历史数据')\n",
" df_test.drop([0],inplace=True)\n",
" df_test[['汽油执行价','柴油执行价','齐鲁石化销量','齐鲁石化产量','齐鲁石化成交价','齐鲁石化库存','科力达销量',\n",
" '科力达产量','科力达成交价','科力达库存','弘润销量','弘润产量','弘润成交价','弘润库存','市场成交价','京博指导价',\n",
" '布伦特上周收盘价','布伦特昨日收盘价','布伦特收盘价','上期所沥青主力合约','隆重资讯沥青日开工率','隆重资讯沥青月库存',\n",
" '隆重资讯沥青月产量','隆重资讯沥青表观消费量','隆重资讯社会库存率','厂区库容','京博提货量','即期成本','异地库库存',\n",
" '京博签单量','京博库存量','京博产量','加权平均成交价']] = df_test[['汽油执行价','柴油执行价','齐鲁石化销量',\n",
" '齐鲁石化产量','齐鲁石化成交价','齐鲁石化库存','科力达销量',\n",
" '科力达产量','科力达成交价','科力达库存','弘润销量','弘润产量','弘润成交价','弘润库存','市场成交价','京博指导价',\n",
" '布伦特上周收盘价','布伦特昨日收盘价','布伦特收盘价','上期所沥青主力合约','隆重资讯沥青日开工率','隆重资讯沥青月库存',\n",
" '隆重资讯沥青月产量','隆重资讯沥青表观消费量','隆重资讯社会库存率','厂区库容','京博提货量','即期成本','异地库库存',\n",
" '京博签单量','京博库存量','京博产量','加权平均成交价']].astype('float')\n",
" # df_test['日期']=pd.to_datetime(df_test['日期'], format='%d/%m/%Y',infer_datetime_format=True)\n",
" df_test['日期']=pd.to_datetime(df_test['日期'], format='%Y-%m-%d',infer_datetime_format=True)\n",
"\n",
" #查看每个特征缺失值数量\n",
" MisVal_Check=df_test.isnull().sum().sort_values(ascending=False)\n",
" #去掉缺失值百分比>0.4的特征, 去掉这些特征后的新表格命名为df_test_1\n",
" df_MisVal_Check = pd.DataFrame(MisVal_Check,)#\n",
" df_MisVal_Check_1=df_MisVal_Check.reset_index()\n",
" df_MisVal_Check_1.columns=['Variable_Name','Missing_Number'] \n",
" df_MisVal_Check_1['Missing_Number']=df_MisVal_Check_1['Missing_Number']/len(df_test)\n",
" df_test_1=df_test.drop(df_MisVal_Check_1[df_MisVal_Check_1['Missing_Number']>0.4].Variable_Name,axis = 1)\n",
"\n",
" #将缺失值补为前一个或者后一个数值\n",
" df_test_1=df_test_1.fillna(df_test.ffill())\n",
" df_test_1=df_test_1.fillna(df_test_1.bfill())\n",
"\n",
" # 选择用于模型训练的列名称\n",
" col_for_training = df_test_1.columns\n",
" import joblib\n",
" Best_model_DalyLGPrice = joblib.load(\"日度价格预测_最佳模型.pkl\")\n",
" # 最新的一天为最后一行的数据\n",
" df_test_1_Day = df_test_1.tail(1)\n",
" # 移除不需要的列\n",
" df_test_1_Day.index = df_test_1_Day[\"日期\"]\n",
" df_test_1_Day = df_test_1_Day.drop([\"日期\"], axis= 1)\n",
" df_test_1_Day=df_test_1_Day.drop('京博指导价',axis=1)\n",
" df_test_1_Day=df_test_1_Day.dropna()\n",
"\n",
" # df_test_1_Day\n",
" #预测今日价格,显示至小数点后两位\n",
" Ypredict_Today=Best_model_DalyLGPrice.predict(df_test_1_Day)\n",
"\n",
" df_test_1_Day['日度预测价格']=Ypredict_Today\n",
" print(df_test_1_Day['日度预测价格'])\n",
" a = df_test_1_Day['日度预测价格']\n",
" a = a[0]\n",
" a = float(a)\n",
" a = round(a,2)\n",
" return a\n",
"def optimize_Model():\n",
" from sklearn.model_selection import train_test_split\n",
" from sklearn.impute import SimpleImputer\n",
" from sklearn.preprocessing import OrdinalEncoder\n",
" from sklearn.feature_selection import SelectFromModel\n",
" from sklearn.metrics import mean_squared_error, r2_score\n",
"\n",
" pd.set_option('display.max_rows',40) \n",
" pd.set_option('display.max_columns',40) \n",
" df_test = pd.read_excel('沥青数据项.xls',sheet_name='数据项历史数据')\n",
" df_test.drop([0],inplace=True)\n",
" df_test[['汽油执行价','柴油执行价','齐鲁石化销量','齐鲁石化产量','齐鲁石化成交价','齐鲁石化库存','科力达销量',\n",
" '科力达产量','科力达成交价','科力达库存','弘润销量','弘润产量','弘润成交价','弘润库存','市场成交价','京博指导价',\n",
" '布伦特上周收盘价','布伦特昨日收盘价','布伦特收盘价','上期所沥青主力合约','隆重资讯沥青日开工率','隆重资讯沥青月库存',\n",
" '隆重资讯沥青月产量','隆重资讯沥青表观消费量','隆重资讯社会库存率','厂区库容','京博提货量','即期成本','异地库库存',\n",
" '京博签单量','京博库存量','京博产量','加权平均成交价']] = df_test[['汽油执行价','柴油执行价','齐鲁石化销量','齐鲁石化产量','齐鲁石化成交价','齐鲁石化库存','科力达销量',\n",
" '科力达产量','科力达成交价','科力达库存','弘润销量','弘润产量','弘润成交价','弘润库存','市场成交价','京博指导价',\n",
" '布伦特上周收盘价','布伦特昨日收盘价','布伦特收盘价','上期所沥青主力合约','隆重资讯沥青日开工率','隆重资讯沥青月库存',\n",
" '隆重资讯沥青月产量','隆重资讯沥青表观消费量','隆重资讯社会库存率','厂区库容','京博提货量','即期成本','异地库库存',\n",
" '京博签单量','京博库存量','京博产量','加权平均成交价']].astype('float')\n",
" # df_test = pd.read_csv('定价模型数据收集20190901-20230615.csv',encoding = 'gbk',engine = 'python')\n",
" # df_test['日期']=pd.to_datetime(df_test['日期'], format='%m/%d/%Y',infer_datetime_format=True)\n",
" df_test['日期']=pd.to_datetime(df_test['日期'], format='%Y-%m-%d',infer_datetime_format=True)\n",
" # df_test.tail(3)\n",
" MisVal_Check=df_test.isnull().sum().sort_values(ascending=False)\n",
" #去掉缺失值百分比>0.4的特征, 去掉这些特征后的新表格命名为df_test_1\n",
" df_MisVal_Check = pd.DataFrame(MisVal_Check,)#\n",
" df_MisVal_Check_1=df_MisVal_Check.reset_index()\n",
" df_MisVal_Check_1.columns=['Variable_Name','Missing_Number'] \n",
" df_MisVal_Check_1['Missing_Number']=df_MisVal_Check_1['Missing_Number']/len(df_test)\n",
" df_test_1=df_test.drop(df_MisVal_Check_1[df_MisVal_Check_1['Missing_Number']>0.4].Variable_Name,axis = 1)\n",
" #将缺失值补为前一个或者后一个数值\n",
" df_test_1=df_test_1.fillna(df_test.ffill())\n",
" df_test_1=df_test_1.fillna(df_test_1.bfill())\n",
" df_test_1[\"日期\"] = pd.to_datetime(df_test_1[\"日期\"])\n",
" df_test_1.index = df_test_1[\"日期\"]\n",
" df_test_1 = df_test_1.drop([\"日期\"], axis= 1)\n",
" dataset1=df_test_1.drop('京博指导价',axis=1)#.astype(float)\n",
"\n",
" y=df_test_1['京博指导价']\n",
"\n",
" x=dataset1 \n",
"\n",
" train = x\n",
" target = y\n",
"\n",
" #切割数据样本集合测试集\n",
" X_train,x_test,y_train,y_true = train_test_split(train,target,test_size=0.2,random_state=0)\n",
" \n",
" \n",
" from sklearn.linear_model import Lasso\n",
" from xgboost import XGBRegressor\n",
"\n",
" from datetime import datetime\n",
" import statsmodels.api as sm\n",
" # from keras.preprocessing.sequence import TimeseriesGenerator\n",
" from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator\n",
"\n",
" import plotly.express as px\n",
" import plotly.graph_objects as go\n",
"\n",
" import xgboost as xgb\n",
" from xgboost import plot_importance, plot_tree\n",
" from sklearn.metrics import mean_absolute_error\n",
" from statsmodels.tools.eval_measures import mse,rmse\n",
" from sklearn.model_selection import GridSearchCV\n",
" from xgboost import XGBRegressor\n",
" import warnings\n",
" import pickle\n",
"\n",
" from sklearn.metrics import mean_squared_error\n",
"\n",
" #切割训练数据和样本数据\n",
" from sklearn.model_selection import train_test_split\n",
"\n",
" #用于模型评分\n",
" from sklearn.metrics import r2_score\n",
"\n",
" #模型缩写\n",
" Lasso = Lasso(random_state=0)\n",
" XGBR = XGBRegressor(random_state=0)\n",
" Lasso.fit(X_train,y_train)\n",
" XGBR.fit(X_train,y_train)\n",
" y_pre_Lasso = Lasso.predict(x_test)\n",
" y_pre_XGBR = XGBR.predict(x_test)\n",
"\n",
" #计算Lasso、XGBR、RandomForestR、AdaBoostR、GradientBoostingR、BaggingRegressor各模型的R²\n",
" Lasso_score = r2_score(y_true,y_pre_Lasso)\n",
" XGBR_score=r2_score(y_true,y_pre_XGBR)\n",
"\n",
" #计算Lasso、XGBR的MSE和RMSE\n",
" Lasso_MSE=mean_squared_error(y_true, y_pre_Lasso)\n",
" XGBR_MSE=mean_squared_error(y_true, y_pre_XGBR)\n",
"\n",
" Lasso_RMSE=np.sqrt(Lasso_MSE)\n",
" XGBR_RMSE=np.sqrt(XGBR_MSE)\n",
" model_results = pd.DataFrame([['Lasso', Lasso_RMSE, Lasso_score],\n",
" ['XgBoost', XGBR_RMSE, XGBR_score]],\n",
" columns = ['模型(Model)','均方根误差(RMSE)', 'R^2 score'])\n",
" model_results1=model_results.set_index('模型(Model)')\n",
"\n",
" def plot_feature_importance(importance,names,model_type):\n",
" feature_importance = np.array(importance)\n",
" feature_names = np.array(names)\n",
"\n",
" data={'feature_names':feature_names,'feature_importance':feature_importance}\n",
" fi_df = pd.DataFrame(data)\n",
"\n",
" fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)\n",
"\n",
" plt.figure(figsize=(10,8))\n",
" sn.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])\n",
"\n",
" plt.title(model_type + \" \"+'FEATURE IMPORTANCE')\n",
" plt.xlabel('FEATURE IMPORTANCE')\n",
" plt.ylabel('FEATURE NAMES')\n",
" from pylab import mpl\n",
" %pylab\n",
" mpl.rcParams['font.sans-serif'] = ['SimHei']\n",
" from xgboost import XGBRegressor\n",
" from sklearn.model_selection import GridSearchCV\n",
"\n",
" estimator = XGBRegressor(random_state=0,\n",
" nthread=4,\n",
" seed=0\n",
" )\n",
" parameters = {\n",
" 'max_depth': range (2, 11, 2), # 树的最大深度\n",
" 'n_estimators': range (50, 101, 10), # 迭代次数\n",
" 'learning_rate': [0.01, 0.03, 0.1, 0.3, 0.5, 1]\n",
" }\n",
"\n",
" grid_search_XGB = GridSearchCV(\n",
" estimator=estimator,\n",
" param_grid=parameters,\n",
" # n_jobs = 10,\n",
" cv = 3,\n",
" verbose=True\n",
" )\n",
"\n",
" grid_search_XGB.fit(X_train, y_train)\n",
" print(\"Best score: %0.3f\" % grid_search_XGB.best_score_)\n",
" print(\"Best parameters set:\")\n",
" best_parameters = grid_search_XGB.best_estimator_.get_params()\n",
" for param_name in sorted(parameters.keys()):\n",
" print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n",
" y_pred = grid_search_XGB.predict(x_test)\n",
"\n",
" op_XGBR_score = r2_score(y_true,y_pred)\n",
" op_XGBR_MSE= mean_squared_error(y_true, y_pred)\n",
" op_XGBR_RMSE= np.sqrt(op_XGBR_MSE)\n",
"\n",
" model_results2 = pd.DataFrame([['Optimized_Xgboost', op_XGBR_RMSE, op_XGBR_score]],\n",
" columns = ['模型(Model)', '均方根误差(RMSE)', 'R^2 score'])\n",
" model_results2=model_results2.set_index('模型(Model)')\n",
"\n",
" # results = model_results1.append(model_results2, ignore_index = False)\n",
" results = pd.concat([model_results1,model_results2],ignore_index=True)\n",
" results\n",
" import pickle\n",
"\n",
" Pkl_Filename = \"日度价格预测_最佳模型.pkl\" \n",
"\n",
" with open(Pkl_Filename, 'wb') as file: \n",
" pickle.dump(grid_search_XGB, file)\n",
"\n",
"\n",
"\n",
"\n",
" \n",
"\n",
"\n",
" \n",
"\n",
"def read_xls_data():\n",
" global one_cols, two_cols\n",
" # 打开 XLS 文件\n",
" workbook = xlrd.open_workbook(read_file_path_name)\n",
"\n",
" # 获取所有表格名称\n",
" # sheet_names = workbook.sheet_names()\n",
"\n",
" # 选择第一个表格\n",
" sheet = workbook.sheet_by_index(0)\n",
"\n",
" # 获取行数和列数\n",
" num_rows = sheet.nrows\n",
" # num_cols = sheet.ncols\n",
"\n",
" # 遍历每一行,获取单元格数据\n",
" # for i in range(num_rows):\n",
" # row_data = sheet.row_values(i)\n",
" # one_cols.append(row_data)\n",
" # two_cols.append(row_data[1])\n",
"\n",
" row_data = sheet.row_values(1)\n",
" one_cols = row_data\n",
"\n",
" # 关闭 XLS 文件\n",
" # workbook.close()\n",
"\n",
"\n",
"\n",
"\n",
"def start():\n",
" '''预测上传数据'''\n",
" read_xls_data()\n",
"\n",
" token = get_head_auth()\n",
" if not token:\n",
" return\n",
" token_push = get_head_push_auth()\n",
" if not token_push:\n",
" return\n",
"\n",
" datas = get_data_value(token, one_cols[1:])\n",
" if not datas:\n",
" return\n",
"\n",
" # data_list = [two_cols, one_cols]\n",
" append_rows = [get_cur_time()[1]]\n",
" dataItemNo_dataValue = {}\n",
" for data_value in datas:\n",
" if \"dataValue\" not in data_value:\n",
" print(data_value)\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = \"\"\n",
" else:\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = data_value[\"dataValue\"]\n",
" \n",
" for value in one_cols[1:]:\n",
" if value in dataItemNo_dataValue:\n",
" append_rows.append(dataItemNo_dataValue[value])\n",
" else:\n",
" append_rows.append(\"\")\n",
" save_xls(append_rows)\n",
" optimize_Model()\n",
" upload_data_to_system(token_push)\n",
" # data_list.append(three_cols)\n",
" # write_xls(data_list)\n",
"\n",
"\n",
2025-03-03 17:54:15 +08:00
"def start_3(date,token,token_push):\n",
2024-11-01 17:33:48 +08:00
" '''预测上传数据'''\n",
" read_xls_data()\n",
"\n",
2025-03-03 17:54:15 +08:00
" # token = get_head_auth()\n",
" # if not token:\n",
" # return\n",
" # token_push = get_head_push_auth()\n",
" # if not token_push:\n",
" # return\n",
2024-11-01 17:33:48 +08:00
"\n",
" datas = get_data_value(token, one_cols[1:],date)\n",
" if not datas:\n",
" return\n",
"\n",
" # data_list = [two_cols, one_cols]\n",
" append_rows = [get_cur_time(date)[1]]\n",
" dataItemNo_dataValue = {}\n",
" for data_value in datas:\n",
" if \"dataValue\" not in data_value:\n",
" print(data_value)\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = \"\"\n",
" else:\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = data_value[\"dataValue\"]\n",
" \n",
" for value in one_cols[1:]:\n",
" if value in dataItemNo_dataValue:\n",
" append_rows.append(dataItemNo_dataValue[value])\n",
" else:\n",
" append_rows.append(\"\")\n",
" save_xls(append_rows)\n",
2025-03-03 17:54:15 +08:00
" optimize_Model()\n",
" upload_data_to_system(token_push,date)\n",
2024-11-01 17:33:48 +08:00
" # data_list.append(three_cols)\n",
" # write_xls(data_list)\n",
"\n",
"\n",
"\n",
"def start_1():\n",
" '''更新数据'''\n",
" read_xls_data()\n",
"\n",
" token = get_head_auth()\n",
" if not token:\n",
" return\n",
" \n",
"\n",
" datas = get_data_value(token, one_cols[1:])\n",
" if not datas:\n",
" return\n",
"\n",
" # data_list = [two_cols, one_cols]\n",
" append_rows = [get_cur_time()[1]]\n",
" dataItemNo_dataValue = {}\n",
" for data_value in datas:\n",
" if \"dataValue\" not in data_value:\n",
" print(data_value)\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = \"\"\n",
" else:\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = data_value[\"dataValue\"]\n",
" \n",
" for value in one_cols[1:]:\n",
" if value in dataItemNo_dataValue:\n",
" append_rows.append(dataItemNo_dataValue[value])\n",
" else:\n",
" append_rows.append(\"\")\n",
" save_xls_1(append_rows)\n",
"\n",
" \n",
" # data_list.append(three_cols)\n",
" # write_xls(data_list)\n",
"\n",
"\n",
2025-03-03 17:54:15 +08:00
"def start_2(date,token):\n",
2024-11-01 17:33:48 +08:00
" '''更新数据'''\n",
" read_xls_data()\n",
"\n",
2025-03-03 17:54:15 +08:00
" # token = get_head_auth()\n",
" # if not token:\n",
" # return\n",
2024-11-01 17:33:48 +08:00
" \n",
"\n",
" datas = get_data_value(token, one_cols[1:],date)\n",
" if not datas:\n",
" return\n",
"\n",
" # data_list = [two_cols, one_cols]\n",
" append_rows = [get_cur_time(date=date)[1]]\n",
" dataItemNo_dataValue = {}\n",
" for data_value in datas:\n",
" if \"dataValue\" not in data_value:\n",
" print(data_value)\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = \"\"\n",
" else:\n",
" dataItemNo_dataValue[data_value[\"dataItemNo\"]] = data_value[\"dataValue\"]\n",
" \n",
" for value in one_cols[1:]:\n",
" if value in dataItemNo_dataValue:\n",
" append_rows.append(dataItemNo_dataValue[value])\n",
" else:\n",
" append_rows.append(\"\")\n",
" print('新增数据:',append_rows)\n",
" save_xls_1(append_rows)\n",
"\n",
" \n",
" # data_list.append(three_cols)\n",
" # write_xls(data_list)\n",
" \n",
"def save_xls_1(append_rows):\n",
"\n",
" # 打开xls文件\n",
" workbook = xlrd.open_workbook('沥青数据项.xls')\n",
"\n",
" # 获取所有sheet的个数\n",
" sheet_count = len(workbook.sheet_names())\n",
"\n",
" # 获取所有sheet的名称\n",
" sheet_names = workbook.sheet_names()\n",
"\n",
" new_workbook = xlwt.Workbook()\n",
" for i in range(sheet_count):\n",
" # 获取当前sheet\n",
" sheet = workbook.sheet_by_index(i)\n",
"\n",
" # 获取sheet的行数和列数\n",
" row_count = sheet.nrows - 1\n",
" col_count = sheet.ncols\n",
" # 获取原有数据\n",
" data = []\n",
" for row in range(row_count):\n",
" row_data = []\n",
" for col in range(col_count):\n",
" row_data.append(sheet.cell_value(row, col))\n",
" data.append(row_data)\n",
" # 创建xlwt的Workbook对象\n",
" # 创建sheet\n",
" new_sheet = new_workbook.add_sheet(sheet_names[i])\n",
"\n",
" # 将原有的数据写入新的sheet\n",
" for row in range(row_count):\n",
" for col in range(col_count):\n",
" new_sheet.write(row, col, data[row][col])\n",
"\n",
" if i == 0:\n",
" # 在新的sheet中添加数据\n",
" for col in range(col_count):\n",
" new_sheet.write(row_count, col, append_rows[col])\n",
"\n",
" # 保存新的xls文件\n",
" new_workbook.save(\"沥青数据项.xls\") \n",
"\n",
" \n",
" \n",
" \n",
"def check_data(dataItemNo):\n",
" token = get_head_auth()\n",
" if not token:\n",
" return\n",
"\n",
" datas = get_data_value(token, dataItemNo)\n",
" if not datas:\n",
" return\n",
"\n",
"\n",
"def save_xls(append_rows):\n",
"\n",
" # 打开xls文件\n",
" workbook = xlrd.open_workbook('沥青数据项.xls')\n",
"\n",
" # 获取所有sheet的个数\n",
" sheet_count = len(workbook.sheet_names())\n",
"\n",
" # 获取所有sheet的名称\n",
" sheet_names = workbook.sheet_names()\n",
"\n",
" new_workbook = xlwt.Workbook()\n",
" for i in range(sheet_count):\n",
" # 获取当前sheet\n",
" sheet = workbook.sheet_by_index(i)\n",
"\n",
" # 获取sheet的行数和列数\n",
" row_count = sheet.nrows\n",
" col_count = sheet.ncols\n",
" # 获取原有数据\n",
" data = []\n",
" for row in range(row_count):\n",
" row_data = []\n",
" for col in range(col_count):\n",
" row_data.append(sheet.cell_value(row, col))\n",
" data.append(row_data)\n",
" # 创建xlwt的Workbook对象\n",
" # 创建sheet\n",
" new_sheet = new_workbook.add_sheet(sheet_names[i])\n",
"\n",
" # 将原有的数据写入新的sheet\n",
" for row in range(row_count):\n",
" for col in range(col_count):\n",
" new_sheet.write(row, col, data[row][col])\n",
"\n",
" if i == 0:\n",
" # 在新的sheet中添加数据\n",
" for col in range(col_count):\n",
" new_sheet.write(row_count, col, append_rows[col])\n",
"\n",
" # 保存新的xls文件\n",
" new_workbook.save(\"沥青数据项.xls\")\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" pass\n",
" # 需要单独运行放开\n",
"# start()\n",
"\n",
" # 每天定时12点运行\n",
" # while True:\n",
" # # 获取当前时间\n",
" # current_time = time.strftime(\"%H:%M:%S\", time.localtime())\n",
" # current_time_1 = time.strftime(\"%H:%M:%S\", time.localtime())\n",
"\n",
" # # 判断当前时间是否为执行任务的时间点\n",
" # if current_time == \"12:00:00\":\n",
" # print(\"执行定时任务\")\n",
" # start()\n",
"\n",
" # # 休眠1秒钟, 避免过多占用CPU资源\n",
" # time.sleep(1)\n",
" \n",
" # elif current_time_1 == \"20:00:00\":\n",
" # print(\"更新数据\")\n",
" # start_1()\n",
" # time.sleep(1)\n",
"\n",
"\n",
"# # 检测数据准确性, 需要检测放开\n",
"# # check_data(\"100028098|LISTING_PRICE\")\n",
"# # check_data(\"9137070016544622XB|DAY_Yield\")\n"
]
},
{
"cell_type": "code",
2025-03-03 17:54:15 +08:00
"execution_count": null,
2024-11-01 17:33:48 +08:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2025-03-03 17:54:15 +08:00
"20250102\n",
"新增数据: ['2025-01-02', 8057.0, 6784.0, 0.05, 0.0, 3650.0, 0.75, 0.0, 0.0, 3520.0, 7.9, 0.2, 0.2, 3540.0, 1.15, '', 3600.0, 73.36, '', '', 3669.0, 25.1642, '', '', '', '', 229522.1, 6197.58, 3551.9952, '', '', 75999.0902808, 6931.295, '']\n",
"20250103\n",
"新增数据: ['2025-01-03', 8121.0, 6829.0, 0.05, 0.0, 3650.0, 0.7, 0.0, 0.0, 3520.0, 7.9, 0.2, 0.2, 3540.0, 1.15, '', 3600.0, 73.36, 76.03, '', 3678.0, 29.9291, '', '', '', '', 229522.1, 8038.22, 897.5595, '', 50180.0, 73015.8650188, 6693.26, 3602.01]\n",
"20250104\n",
"新增数据: ['2025-01-04', 8156.0, 6856.0, 0.05, 0.0, 3650.0, 0.7, 0.0, 0.0, 3520.0, 7.9, 0.2, 0.2, 3540.0, 1.15, '', 3600.0, 73.36, 76.69, '', '', 32.2931, '', '', '', '', 229522.1, 7900.62, 3667.3859, '', '', 72148.9646528, 6804.94, '']\n",
"20250105\n",
"新增数据: ['2025-01-05', 8192.0, 6856.0, 0.05, 0.0, 3650.0, 0.65, 0.0, 0.0, 3520.0, 7.9, 0.15, 0.2, 3540.0, 1.2, '', 3600.0, 76.69, '', '', '', 30.8747, '', '', '', '', 229522.1, 6417.5, 3657.2132, '', '', 75308.7978357, 6814.96, '']\n",
"20250106\n",
"新增数据: ['2025-01-06', 8226.0, 6856.0, 0.05, 0.0, 3650.0, 0.6, 0.0, 0.0, 3500.0, 7.9, 0.12, 0.2, 3510.0, 1.28, '', 3600.0, 76.69, '', '', 3610.0, 31.8203, '', '', '', '', 229522.1, 5489.2, 3679.4214, '', '', 74486.3079283, 6814.99, '']\n",
"20250107\n",
"新增数据: ['2025-01-07', 8226.0, 6904.0, 0.03, 0.0, 3600.0, 0.57, 0.0, 0.0, 3480.0, 7.9, 0.3, 0.2, 3510.0, 1.26, '', 3550.0, 76.69, 76.26, '', 3603.0, 33.0024, '', '', '', 12.82003192, 229522.1, 5358.32, 3634.1004, '', '', 76588.1941448, 6795.11, '']\n",
"20250108\n",
"新增数据: ['2025-01-08', 8318.0, 6939.0, 0.03, 0.25, 3600.0, 0.57, 0.0, 0.0, 3480.0, 7.9, 0.3, 0.2, 3470.0, 1.16, '', 3530.0, 76.69, 77.25, '', 3615.0, 34.5154, '', '', '', '', 229522.1, 6866.76, 3720.2633, '', '', 75902.1162471, 6756.81, '']\n",
"20250109\n",
"新增数据: ['2025-01-09', 8363.0, 6984.0, 0.03, 0.25, 3600.0, 0.79, 0.0, 0.0, 3480.0, 7.9, 0.2, 0.2, 3470.0, 1.06, '', 3530.0, 76.69, 76.2, '', 3649.0, 31.6785, '', '', '', '', 229522.1, 8478.06, 3571.7593, '', '', 73757.9160547, 6772.99, '']\n",
"20250110\n",
"新增数据: ['2025-01-10', 8467.0, 7031.0, 0.03, 0.25, 3600.0, 1.0, 0.0, 0.0, 3500.0, 7.9, 0.2, 0.2, 3490.0, 1.06, '', 3530.0, 76.69, 77.21, '', 3691.0, 34.5154, '', '', '', '', 229522.1, 6857.04, 3750.5711, '', '', 73779.8843598, 6569.2, '']\n",
"20250111\n",
"新增数据: ['2025-01-11', 9050.0, 7567.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3700.0, 76.69, 79.59, '', '', 34.5154, '', '', '', '', 229522.1, 8679.56, 3907.1448, '', 140.0, 72204.2847252, 6466.43, 3900.0]\n",
"20250112\n",
"新增数据: ['2025-01-12', 9323.0, 7675.0, 0.15, 0.25, 3600.0, 1.22, 0.0, 0.0, 3630.0, 7.9, 0.1, 0.2, 3650.0, 1.26, '', 3700.0, 79.59, '', '', '', 32.6241, '', '', '', '', 229522.1, 2818.9, 3687.137, '', '', 76933.4067215, 6479.1, '']\n",
"20250113\n",
"新增数据: ['2025-01-13', 9323.0, 7675.0, 0.15, 0.25, 3600.0, 1.32, 0.0, 0.0, 3630.0, 7.9, 0.1, 0.2, 3650.0, 1.36, '', 3800.0, 79.59, '', '', 3807.0, 34.9882, '', '', '', '', 229522.1, 2971.16, 3777.5616, '', 210.0, 79773.9416121, 5746.0, 3800.0]\n",
"20250114\n",
"新增数据: ['2025-01-14', 9047.0, 7454.0, 0.15, 0.25, 3600.0, 1.42, 0.0, 0.0, 3620.0, 7.9, 0.15, 0.2, 3620.0, 1.46, '', 3800.0, 79.59, 80.86, '', 3748.0, 34.9882, '', '', '', 12.95294256, 229522.1, 6394.5, 3781.754, '', '', 79492.6684067, 5645.07, '']\n",
"20250115\n",
"新增数据: ['2025-01-15', 8965.0, 7308.0, 0.15, 0.25, 3600.0, 1.52, 0.0, 0.0, 3620.0, 7.9, 0.15, 0.2, 3610.0, 1.51, '', 3700.0, 79.59, 80.01, '', 3795.0, 36.4066, '', '', '', '', 229522.1, 6365.01, 512.6357, '', '', 79167.4859979, 5680.14, '']\n",
"20250116\n",
"新增数据: ['2025-01-16', 8893.0, 7256.0, 0.15, 0.25, 3600.0, 1.62, 0.0, 0.0, 3620.0, 7.9, 0.2, 0.2, 3610.0, 1.51, '', 3700.0, 79.59, 82.43, '', 3847.0, 36.4066, '', '', '', '', 229522.1, 6617.55, 4137.6414, '', '', 78477.747561, 5353.39, '']\n",
"20250117\n",
"新增数据: ['2025-01-17', 8893.0, 7221.0, 0.45, 0.25, 3600.0, 1.32, 0.0, 0.0, 3600.0, 7.9, 0.2, 0.2, 3610.0, 1.51, '', 3700.0, 79.59, 81.36, '', 3795.0, 36.4066, '', '', '', '', 229522.1, 5119.64, 4008.3951, '', '', 79603.2505459, 4637.39, '']\n",
"20250118\n",
"新增数据: ['2025-01-18', 8848.0, 7175.0, 0.2, 0.0, 3600.0, 1.12, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.61, '', 3700.0, 79.59, 80.75, '', '', 32.6241, '', '', '', '', 229522.1, 2455.2, 3946.2519, '', '', 81923.5310154, 4563.86, '']\n",
"20250119\n",
"新增数据: ['2025-01-19', 8766.0, 7093.0, 0.2, 0.0, 3600.0, 1.12, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.51, '', 3700.0, 80.75, '', '', '', 31.2057, '', '', '', '', 229522.1, 1344.66, 3972.0295, '', '', 85334.9336625, 4614.82, '']\n",
"20250120\n",
"新增数据: ['2025-01-20', 8665.0, 7013.0, 0.2, 0.0, 3600.0, 1.12, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.61, '', 3700.0, 80.75, '', '', 3735.0, 31.2057, '', '', '', '', 229522.1, 1914.2, 4009.7693, '', '', 88766.602507, 4651.54, '']\n",
"20250121\n",
"新增数据: ['2025-01-21', 8665.0, 6957.0, 0.2, 0.0, 3600.0, 0.92, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.71, '', 3650.0, 80.75, 76.33, '', 3736.0, 30.7329, '', '', '', 13.92264145, 229522.1, 2044.82, 3942.8707, '', '', 91204.3689572, 4639.75, '']\n",
"20250122\n",
"新增数据: ['2025-01-22', 8665.0, 6957.0, 0.2, 0.0, 3600.0, 0.72, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.81, '', 3650.0, 80.75, 79.4, '', 3749.0, 30.7329, '', '', '', '', 229522.1, 3259.22, 3871.276, '', '', 93526.1525172, 4624.36, '']\n",
"20250123\n",
"新增数据: ['2025-01-23', 8630.0, 6913.0, 0.35, 0.0, 3600.0, 0.32, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 1.91, '', 3650.0, 80.75, 76.59, '', 3739.0, 29.7872, '', '', '', '', 229522.1, 2853.49, 3846.3643, '', '', 95756.3698162, 4650.31, '']\n",
"20250124\n",
"新增数据: ['2025-01-24', 8630.0, 6913.0, 0.3, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.1, 0.2, 3610.0, 2.01, '', 3650.0, 80.75, 77.43, '', 3750.0, 29.7872, '', '', '', '', 229522.1, 437.72, 3816.5609, '', '', 100306.8638258, 4617.64, 3650.0]\n",
"20250125\n",
"新增数据: ['2025-01-25', 8804.0, 6913.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.03, 0.2, 3610.0, 2.18, '', 3650.0, 80.75, 74.88, '', '', 28.3688, '', '', '', '', 229522.1, '', 3705.6961, '', '', 105207.7537524, 4618.44, '']\n",
"20250126\n",
"新增数据: ['2025-01-26', 8857.0, 6939.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.03, 0.2, 3610.0, 2.35, '', 3650.0, 74.88, '', '', '', 28.3688, '', '', '', '', 229522.1, '', 3694.4462, '', '', 110382.4647653, 4565.66, '']\n",
"20250127\n",
"新增数据: ['2025-01-27', 8857.0, 6939.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 2.55, '', 3650.0, 74.88, '', '', 3715.0, 28.3688, '', '', '', '', 229522.1, '', 3626.8247, '', '', 115505.5177864, 4535.31, '']\n",
"20250128\n",
"新增数据: ['2025-01-28', 8857.0, 6939.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 2.75, '', 3650.0, 74.88, 73.65, '', '', 27.896, '', '', '', 15.05736163, 229522.1, '', 3584.3414, '', '', 120715.6507229, 4561.74, '']\n",
"20250129\n",
"新增数据: ['2025-01-29', 8857.0, 6939.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 2.95, '', 3650.0, 74.88, 76.53, '', '', 26.9504, '', '', '', '', 229522.1, '', 3618.8126, '', '', 125785.4515644, 4593.562, '']\n",
"20250130\n",
"新增数据: ['2025-01-30', 8857.0, 6939.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3650.0, 74.88, 75.89, '', '', 26.0047, '', '', '', '', 229522.1, '', 3585.9993, '', '', 130781.5671614, 4620.688, '']\n",
"20250131\n",
"新增数据: ['2025-01-31', 8857.0, 6939.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3650.0, 74.88, 76.15, '', '', 25.5319, 68.6, 225.29, '', '', 229522.1, '', 3587.7887, '', '', 134131.216744, 4025.178, '']\n",
"20250201\n",
"新增数据: ['2025-02-01', 8857.0, 6904.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3650.0, 74.88, 76.48, '', '', 25.5319, '', '', '', '', 229522.1, '', 3619.0713, '', '', 141390.8400342, 5245.69, '']\n",
"20250202\n",
"新增数据: ['2025-02-02', 8857.0, 6883.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3650.0, 76.48, '', '', '', 25.5319, '', '', '', '', 229522.1, '', 3624.724, '', '', 146444.993536, 4650.255, '']\n",
"20250203\n",
"新增数据: ['2025-02-03', 8857.0, 6883.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3650.0, 76.48, '', '', '', 25.5319, '', '', '', '', 229522.1, '', 3698.7029, '', '', 151802.9709409, 4656.745, '']\n",
"20250204\n",
"新增数据: ['2025-02-04', 8905.0, 6948.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 4.0, '', 3650.0, 76.48, 75.4, '', '', 26.4775, '', '', '', 17.21173912, 229522.1, 32.7, 3671.4625, '', '', 156872.340789, 4608.25, '']\n",
"20250205\n",
"新增数据: ['2025-02-05', 8956.0, 7022.0, 0.0, 0.25, 3600.0, 0.5, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 4.0, '', 3700.0, 76.48, 76.06, '', 3797.0, 27.4232, '', '', '', '', 229522.1, 857.76, 3622.508, '', '', 160664.5999171, 4644.21, '']\n",
"20250206\n",
"新增数据: ['2025-02-06', 8956.0, 7066.0, 0.0, 0.25, 3600.0, 0.75, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 4.2, '', 3750.0, 76.48, 74.68, '', 3779.0, 27.7541, '', '', '', '', 229522.1, 1217.12, 3513.5033, '', '', 164388.6227375, 4692.07, '']\n",
"20250207\n",
"新增数据: ['2025-02-07', 8921.0, 7031.0, 0.1, 0.25, 3650.0, 0.8, 0.0, 0.0, 3600.0, 7.9, 0.0, 0.2, 3610.0, 4.4, '', 3750.0, 76.48, 74.23, '', 3812.0, 27.7541, '', '', '', '', 229522.1, 942.0, 3631.0462, '', '', 168393.8166163, 4672.16, '']\n",
"20250208\n",
"新增数据: ['2025-02-08', 8866.0, 7004.0, 0.15, 0.25, 3650.0, 0.8, 0.0, 0.0, 3650.0, 7.9, 0.0, 0.2, 3900.0, 4.6, '', 3750.0, 76.48, 74.64, '', '', 27.7541, '', '', '', '', 229522.1, 1089.08, 3664.8865, '', 1000.0, 172610.1660848, 4653.84, 4300.0]\n",
"20250209\n",
"新增数据: ['2025-02-09', 8875.0, 7013.0, '', '', '', '', '', '', '', '', '', '', '', '', '', 3750.0, 74.64, '', '', '', 27.7541, '', '', '', '', 229522.1, 1952.98, 3680.6229, '', '', 175400.4021806, 4608.75, '']\n",
"20250210\n",
"新增数据: ['2025-02-10', 8875.0, 7013.0, 0.15, 0.25, 3650.0, 0.9, 0.0, 0.0, 3650.0, 7.9, 0.0, 0.2, 3900.0, 4.8, '', 3750.0, 74.64, '', '', 3778.0, 28.608, '', '', '', '', 229522.1, 2114.7, 3719.1501, '', 2000.0, 177876.7630674, 4607.05, 4350.0]\n",
"20250211\n",
"新增数据: ['2025-02-11', 8875.0, 7031.0, 0.15, 0.25, 3650.0, 1.0, 0.0, 0.0, 3650.0, 7.9, 0.0, 0.2, 3900.0, 4.8, '', 3800.0, 74.64, 76.01, '', 3776.0, 26.9001, '', '', '', 18.76416033, 229522.1, 1951.18, 3806.0186, '', '', 180448.1199647, 4613.8, '']\n",
"20250212\n"
2024-11-01 17:33:48 +08:00
]
},
{
2025-03-03 17:54:15 +08:00
"ename": "KeyboardInterrupt",
"evalue": "",
2025-01-03 09:32:07 +08:00
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
2025-03-03 17:54:15 +08:00
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[2], line 13\u001b[0m\n\u001b[0;32m 11\u001b[0m start_3(start_date,token,token_push)\n\u001b[0;32m 12\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m1\u001b[39m)\n\u001b[1;32m---> 13\u001b[0m start_2(start_date,token)\n\u001b[0;32m 14\u001b[0m start_date \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m timedelta(days\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
"Cell \u001b[1;32mIn[1], line 600\u001b[0m, in \u001b[0;36mstart_2\u001b[1;34m(date, token)\u001b[0m\n\u001b[0;32m 593\u001b[0m read_xls_data()\n\u001b[0;32m 595\u001b[0m \u001b[38;5;66;03m# token = get_head_auth()\u001b[39;00m\n\u001b[0;32m 596\u001b[0m \u001b[38;5;66;03m# if not token:\u001b[39;00m\n\u001b[0;32m 597\u001b[0m \u001b[38;5;66;03m# return\u001b[39;00m\n\u001b[1;32m--> 600\u001b[0m datas \u001b[38;5;241m=\u001b[39m get_data_value(token, one_cols[\u001b[38;5;241m1\u001b[39m:],date)\n\u001b[0;32m 601\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m datas:\n\u001b[0;32m 602\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n",
"Cell \u001b[1;32mIn[1], line 126\u001b[0m, in \u001b[0;36mget_data_value\u001b[1;34m(token, dataItemNoList, date)\u001b[0m\n\u001b[0;32m 117\u001b[0m search_data \u001b[38;5;241m=\u001b[39m {\n\u001b[0;32m 118\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata\u001b[39m\u001b[38;5;124m\"\u001b[39m: {\n\u001b[0;32m 119\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdate\u001b[39m\u001b[38;5;124m\"\u001b[39m: get_cur_time(date)[\u001b[38;5;241m0\u001b[39m],\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 123\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfuncOperation\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m查询\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 124\u001b[0m }\n\u001b[0;32m 125\u001b[0m headers \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAuthorization\u001b[39m\u001b[38;5;124m\"\u001b[39m: token}\n\u001b[1;32m--> 126\u001b[0m search_res \u001b[38;5;241m=\u001b[39m requests\u001b[38;5;241m.\u001b[39mpost(url\u001b[38;5;241m=\u001b[39msearch_url, headers\u001b[38;5;241m=\u001b[39mheaders, json\u001b[38;5;241m=\u001b[39msearch_data, timeout\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m3\u001b[39m, \u001b[38;5;241m5\u001b[39m))\n\u001b[0;32m 127\u001b[0m search_value \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(search_res\u001b[38;5;241m.\u001b[39mtext)[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m search_value:\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\requests\\api.py:115\u001b[0m, in \u001b[0;36mpost\u001b[1;34m(url, data, json, **kwargs)\u001b[0m\n\u001b[0;32m 103\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost\u001b[39m(url, data\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, json\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 104\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"Sends a POST request.\u001b[39;00m\n\u001b[0;32m 105\u001b[0m \n\u001b[0;32m 106\u001b[0m \u001b[38;5;124;03m :param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[38;5;124;03m :rtype: requests.Response\u001b[39;00m\n\u001b[0;32m 113\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 115\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m request(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpost\u001b[39m\u001b[38;5;124m\"\u001b[39m, url, data\u001b[38;5;241m=\u001b[39mdata, json\u001b[38;5;241m=\u001b[39mjson, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\requests\\api.py:59\u001b[0m, in \u001b[0;36mrequest\u001b[1;34m(method, url, **kwargs)\u001b[0m\n\u001b[0;32m 55\u001b[0m \u001b[38;5;66;03m# By using the 'with' statement we are sure the session is closed, thus we\u001b[39;00m\n\u001b[0;32m 56\u001b[0m \u001b[38;5;66;03m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001b[39;00m\n\u001b[0;32m 57\u001b[0m \u001b[38;5;66;03m# cases, and look like a memory leak in others.\u001b[39;00m\n\u001b[0;32m 58\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m sessions\u001b[38;5;241m.\u001b[39mSession() \u001b[38;5;28;01mas\u001b[39;00m session:\n\u001b[1;32m---> 59\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m session\u001b[38;5;241m.\u001b[39mrequest(method\u001b[38;5;241m=\u001b[39mmethod, url\u001b[38;5;241m=\u001b[39murl, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\requests\\sessions.py:589\u001b[0m, in \u001b[0;36mSession.request\u001b[1;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[0;32m 584\u001b[0m send_kwargs \u001b[38;5;241m=\u001b[39m {\n\u001b[0;32m 585\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m: timeout,\n\u001b[0;32m 586\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mallow_redirects\u001b[39m\u001b[38;5;124m\"\u001b[39m: allow_redirects,\n\u001b[0;32m 587\u001b[0m }\n\u001b[0;32m 588\u001b[0m send_kwargs\u001b[38;5;241m.\u001b[39mupdate(settings)\n\u001b[1;32m--> 589\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msend(prep, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39msend_kwargs)\n\u001b[0;32m 591\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\requests\\sessions.py:703\u001b[0m, in \u001b[0;36mSession.send\u001b[1;34m(self, request, **kwargs)\u001b[0m\n\u001b[0;32m 700\u001b[0m start \u001b[38;5;241m=\u001b[39m preferred_clock()\n\u001b[0;32m 702\u001b[0m \u001b[38;5;66;03m# Send the request\u001b[39;00m\n\u001b[1;32m--> 703\u001b[0m r \u001b[38;5;241m=\u001b[39m adapter\u001b[38;5;241m.\u001b[39msend(request, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 705\u001b[0m \u001b[38;5;66;03m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[0;32m 706\u001b[0m elapsed \u001b[38;5;241m=\u001b[39m preferred_clock() \u001b[38;5;241m-\u001b[39m start\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\requests\\adapters.py:486\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[1;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[0;32m 483\u001b[0m timeout \u001b[38;5;241m=\u001b[39m TimeoutSauce(connect\u001b[38;5;241m=\u001b[39mtimeout, read\u001b[38;5;241m=\u001b[39mtimeout)\n\u001b[0;32m 485\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 486\u001b[0m resp \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39murlopen(\n\u001b[0;32m 487\u001b[0m method\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mmethod,\n\u001b[0;32m 488\u001b[0m url\u001b[38;5;241m=\u001b[39murl,\n\u001b[0;32m 489\u001b[0m body\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mbody,\n\u001b[0;32m 490\u001b[0m headers\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mheaders,\n\u001b[0;32m 491\u001b[0m redirect\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 492\u001b[0m assert_same_host\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 493\u001b[0m preload_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 494\u001b[0m decode_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 495\u001b[0m retries\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_retries,\n\u001b[0;32m 496\u001b[0m timeout\u001b[38;5;241m=\u001b[39mtimeout,\n\u001b[0;32m 497\u001b[0m chunked\u001b[38;5;241m=\u001b[39mchunked,\n\u001b[0;32m 498\u001b[0m )\n\u001b[0;32m 500\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (ProtocolError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[0;32m 501\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m(err, request\u001b[38;5;241m=\u001b[39mrequest)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\urllib3\\connectionpool.py:791\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[1;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001b[0m\n\u001b[0;32m 788\u001b[0m response_conn \u001b[38;5;241m=\u001b[39m conn \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m release_conn \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 790\u001b[0m \u001b[38;5;66;03m# Make the request on the HTTPConnection object\u001b[39;00m\n\u001b[1;32m--> 791\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_request(\n\u001b[0;32m 792\u001b[0m conn,\n\u001b[0;32m 793\u001b[0m method,\n\u001b[0;32m 794\u001b[0m url,\n\u001b[0;32m 795\u001b[0m timeout\u001b[38;5;241m=\u001b[39mtimeout_obj,\n\u001b[0;32m 796\u001b[0m body\u001b[38;5;241m=\u001b[39mbody,\n\u001b[0;32m 797\u001b[0m headers\u001b[38;5;241m=\u001b[39mheaders,\n\u001b[0;32m 798\u001b[0m chunked\u001b[38;5;241m=\u001b[39mchunked,\n\u001b[0;32m 799\u001b[0m retries\u001b[38;5;241m=\u001b[39mretries,\n\u001b[0;32m 800\u001b[0m response_conn\u001b[38;5;241m=\u001b[39mresponse_conn,\n\u001b[0;32m 801\u001b[0m preload_content\u001b[38;5;241m=\u001b[39mpreload_content,\n\u001b[0;32m 802\u001b[0m decode_content\u001b[38;5;241m=\u001b[39mdecode_content,\n\u001b[0;32m 803\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mresponse_kw,\n\u001b[0;32m 804\u001b[0m )\n\u001b[0;32m 806\u001b[0m \u001b[38;5;66;03m# Everything went great!\u001b[39;00m\n\u001b[0;32m 807\u001b[0m clean_exit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\urllib3\\connectionpool.py:537\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[1;34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001b[0m\n\u001b[0;32m 535\u001b[0m \u001b[38;5;66;03m# Receive the response from the server\u001b[39;00m\n\u001b[0;32m 536\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 537\u001b[0m response \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[0;32m 538\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (BaseSSLError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 539\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_raise_timeout(err\u001b[38;5;241m=\u001b[39me, url\u001b[38;5;241m=\u001b[39murl, timeout_value\u001b[38;5;241m=\u001b[39mread_timeout)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\site-packages\\urllib3\\connection.py:461\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 458\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mresponse\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HTTPResponse\n\u001b[0;32m 460\u001b[0m \u001b[38;5;66;03m# Get the response from http.client.HTTPConnection\u001b[39;00m\n\u001b[1;32m--> 461\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[0;32m 463\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 464\u001b[0m assert_header_parsing(httplib_response\u001b[38;5;241m.\u001b[39mmsg)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\http\\client.py:1386\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 1384\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1385\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1386\u001b[0m response\u001b[38;5;241m.\u001b[39mbegin()\n\u001b[0;32m 1387\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m:\n\u001b[0;32m 1388\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclose()\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\http\\client.py:325\u001b[0m, in \u001b[0;36mHTTPResponse.begin\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 323\u001b[0m \u001b[38;5;66;03m# read until we get a non-100 response\u001b[39;00m\n\u001b[0;32m 324\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m--> 325\u001b[0m version, status, reason \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_read_status()\n\u001b[0;32m 326\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m status \u001b[38;5;241m!=\u001b[39m CONTINUE:\n\u001b[0;32m 327\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\http\\client.py:286\u001b[0m, in \u001b[0;36mHTTPResponse._read_status\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 285\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_read_status\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m--> 286\u001b[0m line \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfp\u001b[38;5;241m.\u001b[39mreadline(_MAXLINE \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m), \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124miso-8859-1\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(line) \u001b[38;5;241m>\u001b[39m _MAXLINE:\n\u001b[0;32m 288\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m LineTooLong(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstatus line\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"File \u001b[1;32md:\\ProgramData\\anaconda3\\Lib\\socket.py:706\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[1;34m(self, b)\u001b[0m\n\u001b[0;32m 704\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m 705\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 706\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sock\u001b[38;5;241m.\u001b[39mrecv_into(b)\n\u001b[0;32m 707\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[0;32m 708\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
2024-11-01 17:33:48 +08:00
]
}
],
"source": [
"from datetime import datetime, timedelta\n",
"\n",
2025-03-03 17:54:15 +08:00
"start_date = datetime(2025, 2, 1)\n",
"end_date = datetime(2025, 3, 1)\n",
"token = get_head_auth()\n",
"\n",
"token_push = get_head_push_auth()\n",
2024-11-01 17:33:48 +08:00
"\n",
"while start_date < end_date:\n",
" print(start_date.strftime('%Y%m%d'))\n",
2025-03-03 17:54:15 +08:00
" start_3(start_date,token,token_push)\n",
2024-11-01 17:33:48 +08:00
" time.sleep(1)\n",
2025-03-03 17:54:15 +08:00
" start_2(start_date,token)\n",
2024-11-01 17:33:48 +08:00
" start_date += timedelta(days=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 4
}