aup接口更改,沥青定性定量数据更改

This commit is contained in:
workpc 2024-12-23 17:12:13 +08:00
parent ea3b333936
commit 81e255747c
7 changed files with 1221 additions and 289 deletions

View File

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@ -211,7 +211,6 @@
" \n",
" \n",
" \n",
" \n",
"def start_1():\n",
" workbook = xlrd.open_workbook(read_file_path_name)\n",
"\n",
@ -472,22 +471,22 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"20241209\n"
"20241217\n"
]
}
],
"source": [
"from datetime import datetime, timedelta\n",
"\n",
"start_date = datetime(2024, 12, 9)\n",
"end_date = datetime(2024, 12, 10)\n",
"start_date = datetime(2024, 12, 17)\n",
"end_date = datetime(2024, 12, 18)\n",
"\n",
"while start_date < end_date:\n",
" print(start_date.strftime('%Y%m%d'))\n",

View File

@ -795,14 +795,14 @@
"name": "stdout",
"output_type": "stream",
"text": [
"20241212\n"
"20241217\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:299: UserWarning:\n",
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_4500\\2239815117.py:299: UserWarning:\n",
"\n",
"The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n",
"\n"
@ -812,7 +812,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Using matplotlib backend: <object object at 0x0000015D1F0FF090>\n",
"Using matplotlib backend: <object object at 0x0000017F4168F090>\n",
"%pylab is deprecated, use %matplotlib inline and import the required libraries.\n",
"Populating the interactive namespace from numpy and matplotlib\n",
"Fitting 3 folds for each of 180 candidates, totalling 540 fits\n"
@ -824,7 +824,7 @@
"text": [
"d:\\ProgramData\\anaconda3\\Lib\\site-packages\\IPython\\core\\magics\\pylab.py:162: UserWarning:\n",
"\n",
"pylab import has clobbered these variables: ['datetime', 'plot', 'random', '__version__']\n",
"pylab import has clobbered these variables: ['__version__', 'plot', 'random', 'datetime']\n",
"`%matplotlib` prevents importing * from pylab and numpy\n",
"\n"
]
@ -844,108 +844,33 @@
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:239: UserWarning:\n",
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_4500\\2239815117.py:239: UserWarning:\n",
"\n",
"The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"日期\n",
"2024-12-12 3552.045898\n",
"Name: 日度预测价格, dtype: float32\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:273: FutureWarning:\n",
"\n",
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_4500\\2239815117.py:273: FutureWarning:\n",
"\n",
"Series.__getitem__ treating keys as positions is deprecated. In a future version, integer keys will always be treated as labels (consistent with DataFrame behavior). To access a value by position, use `ser.iloc[pos]`\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"confirmFlg\":false,\"status\":true}\n",
"新增数据: ['2024-12-12', 7957.0, 7066.0, 0.1, 0.0, 3650.0, 0.9, 0.0, 0.0, 3540.0, 7.9, 0.2, 0.2, 3500.0, 0.6, '', 3500.0, 71.05, 73.53, '', 3510.0, 29.5977, '', '', '', '', 229522.1, 11025.3, 3426.4568, '', '', 42263.29278, 6647.44, 3970.0]\n",
"20241213\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:299: UserWarning:\n",
"\n",
"The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using matplotlib backend: QtAgg\n",
"%pylab is deprecated, use %matplotlib inline and import the required libraries.\n",
"Populating the interactive namespace from numpy and matplotlib\n",
"Fitting 3 folds for each of 180 candidates, totalling 540 fits\n",
"Best score: 0.997\n",
"Best parameters set:\n",
"\tlearning_rate: 0.1\n",
"\tmax_depth: 8\n",
"\tn_estimators: 100\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:239: UserWarning:\n",
"\n",
"The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"日期\n",
"2024-12-13 3504.912354\n",
"Name: 日度预测价格, dtype: float32\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\EDY\\AppData\\Local\\Temp\\ipykernel_19760\\2239815117.py:273: FutureWarning:\n",
"\n",
"Series.__getitem__ treating keys as positions is deprecated. In a future version, integer keys will always be treated as labels (consistent with DataFrame behavior). To access a value by position, use `ser.iloc[pos]`\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"2024-12-17 3501.835693\n",
"Name: 日度预测价格, dtype: float32\n",
"{\"confirmFlg\":false,\"status\":true}\n",
"新增数据: ['2024-12-13', 7957.0, 7066.0, 0.1, 0.0, 3650.0, 0.55, 0.0, 0.0, 3540.0, 7.9, 0.2, 0.2, 3600.0, 0.6, '', 3500.0, 71.05, 73.41, '', 3512.0, 29.5977, '', '', '', '', 229522.1, 8091.12, 3417.4442, '', '', 41436.2654696, 6913.5165, '']\n"
"新增数据: ['2024-12-17', 7957.0, 6984.0, 0.0, 0.25, 3650.0, 0.55, 0.0, 0.0, 3520.0, 7.9, 0.2, 0.2, 3500.0, 0.75, '', 3500.0, 74.35, 72.69, '', 3522.0, 28.8998, '', '', '', 13.58990112, 229522.1, 7722.02, 3511.1707, '', '', 40514.8218813, 7088.86, '']\n"
]
}
],
"source": [
"from datetime import datetime, timedelta\n",
"\n",
"start_date = datetime(2024, 12, 12)\n",
"end_date = datetime(2024, 12, 14)\n",
"start_date = datetime(2024, 12, 17)\n",
"end_date = datetime(2024, 12, 18)\n",
"\n",
"while start_date < end_date:\n",
" print(start_date.strftime('%Y%m%d'))\n",

View File

@ -34,14 +34,15 @@ auth = HttpNtlmAuth(f'{graphql_username}', f'{graphql_password}')
# 请求头设置
headers = {'content-type': 'application/json;charset=UTF-8'}
def insert_api_log(request_time, request_url, request_method, request_params, response_content, response_time):
'''
请求日志表 v_tbl_aup_api_log 写入
'''
try:
# 建立数据库连接
cnx = mysql.connector.connect(**config)
global cnx
if cnx is None:
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
# 先查询表中已有记录的数量用于生成新记录的ID
# count_query = "SELECT max(ID) FROM v_tbl_aup_api_log"
@ -70,12 +71,14 @@ def insert_api_log(request_time, request_url, request_method, request_params, re
print(f"Error: {err}")
finally:
# 关闭游标和连接
if cursor:
cursor.close()
if cnx:
cnx.close()
try:
if cursor:
cursor.close()
except UnboundLocalError:
pass
cnx = None
tags_metadata = [
@ -369,8 +372,7 @@ async def generate_graphql_query(
full_path = str(request.url.path)
session = requests.Session()
try:
response = await session.post(url=url, headers=headers, json=payload_json, auth=auth, verify=False, timeout=300)
print(type(response))
response = session.post(url=url, headers=headers, json=payload_json, auth=auth, verify=False, timeout=300)
except requests.exceptions.ConnectTimeout as e:
# 构造符合错误情况的响应数据字典
error_response_data = {
@ -408,12 +410,13 @@ async def generate_graphql_query(
if response.status_code!= 200:
raise HTTPException(status_code=response.status_code, detail=response.text)
print(response.json())
return response.json()
except TypeError as e:
return {"error": str(e)}
@app.get("/get_cases",tags=['get_cases'])
async def post_cases_query_async(request: Request):
async def get_cases_query_async(request: Request):
payload_json2 = {
"query": templates.case_qurey
}
@ -421,7 +424,12 @@ async def post_cases_query_async(request: Request):
request_time = datetime.now()
session = requests.Session()
try:
response = await session.post(url=url, headers=headers, json=payload_json2, auth=auth, verify=False)
response = session.post(url=url, headers=headers, json=payload_json2, auth=auth, verify=False)
# 将JSON字符串解析为Python字典对象
res = response.json()
# # 提取name列表
# name_list = [item["name"] for item in res["data"]["cases"]["items"]]
# res['name_list'] = name_list
except requests.exceptions.ConnectTimeout as e:
# 构造符合错误情况的响应数据字典
error_response_data = {
@ -429,8 +437,8 @@ async def post_cases_query_async(request: Request):
"data": {},
"status_code": 503 # 使用合适的状态码如503表示服务暂时不可用可根据具体错误类型调整
}
response = error_response_data
raise HTTPException(status_code=503, detail=response) # 抛出合适状态码的HTTPException
res = error_response_data
raise HTTPException(status_code=503, detail=res) # 抛出合适状态码的HTTPException
except requests.exceptions.RequestException as e:
# 捕获其他请求相关的异常,统一处理
error_response_data = {
@ -439,14 +447,10 @@ async def post_cases_query_async(request: Request):
"status_code": 500
}
response = error_response_data
raise HTTPException(status_code=500, detail=response)
res = error_response_data
raise HTTPException(status_code=500, detail=res)
finally:
response_time = datetime.now()
try:
res = response.json()
except (UnboundLocalError,AttributeError):
res = response
# 调用插入日志的函数将相关信息记录到数据库中假设insert_api_log函数已正确定义且可访问
insert_api_log(
request_time,
@ -460,16 +464,11 @@ async def post_cases_query_async(request: Request):
if response.status_code!= 200:
raise HTTPException(status_code=response.status_code, detail=response.text)
# 将JSON字符串解析为Python字典对象
data_dict = response.json()
# 提取name列表
name_list = [item["name"] for item in data_dict["data"]["cases"]["items"]]
data_dict['name_lsit'] = name_list
return json.dumps(data_dict)
return res
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8003)
uvicorn.run(app, host="0.0.0.0", port=8003)
# query = """

File diff suppressed because one or more lines are too long