1118 lines
34 KiB
Python
1118 lines
34 KiB
Python
import requests
|
||
import json
|
||
import xlrd
|
||
import xlwt
|
||
from datetime import datetime, timedelta
|
||
import time
|
||
import pandas as pd
|
||
pd.set_option('display.max_columns', None)
|
||
|
||
import numpy as np
|
||
# 变量定义
|
||
login_url = "http://10.200.32.39/jingbo-api/api/server/login"
|
||
login_push_url = "http://10.200.32.39/jingbo-api/api/server/login"
|
||
# query_data_list_item_nos_url
|
||
search_url = "http://10.200.32.39/jingbo-api/api/warehouse/dwDataItem/queryByItemNos" #jingbo-dev/api/warehouse/dwDataItem/queryDataListItemNos
|
||
upload_url = "http://10.200.32.39/jingbo-api/api/dw/dataValue/pushDataValueList"
|
||
queryDataListItemNos_url = "http://10.200.32.39/jingbo-api//api/warehouse/dwDataItem/queryDataListItemNos"
|
||
|
||
|
||
query_data_list_item_nos_data = {
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询",
|
||
"data": {
|
||
"dateStart": "20200101",
|
||
"dateEnd": "20241231",
|
||
"dataItemNoList": ["Brentzdj", "Brentzgj"] # 数据项编码,代表 brent最低价和最高价
|
||
}
|
||
}
|
||
|
||
|
||
login_data = {
|
||
"data": {
|
||
"account": "api_dev",
|
||
"password": "ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=",
|
||
"tenantHashCode": "8a4577dbd919675758d57999a1e891fe",
|
||
"terminal": "API"
|
||
},
|
||
"funcModule": "API",
|
||
"funcOperation": "获取token"
|
||
}
|
||
|
||
login_push_data = {
|
||
"data": {
|
||
"account": "api_dev",
|
||
"password": "ZTEwYWRjMzk0OWJhNTlhYmJlNTZlMDU3ZjIwZjg4M2U=",
|
||
"tenantHashCode": "8a4577dbd919675758d57999a1e891fe",
|
||
"terminal": "API"
|
||
},
|
||
"funcModule": "API",
|
||
"funcOperation": "获取token"
|
||
}
|
||
|
||
|
||
read_file_path_name = "定性模型数据项12-11.xlsx"
|
||
one_cols = []
|
||
two_cols = []
|
||
|
||
def get_head_auth():
|
||
login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
return token
|
||
else:
|
||
print("获取认证失败")
|
||
return None
|
||
|
||
|
||
def get_head_push_auth():
|
||
login_res = requests.post(url=login_push_url, json=login_push_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
return token
|
||
else:
|
||
print("获取认证失败")
|
||
return None
|
||
|
||
|
||
def update_e_value(file_path, column_index, threshold):
|
||
"""
|
||
数据修正需求:2025年1月8日
|
||
如果如果今天的成本即期价跟昨天的成本价差正负1000以上,就按照昨天的成本价计算
|
||
|
||
更新Excel文件中指定列的值,如果新值与前一天的值变化大于阈值,则将新值改为前一天的值。
|
||
|
||
:param file_path: Excel文件路径
|
||
:param column_index: 需要更新的列索引
|
||
:param threshold: 变化阈值
|
||
"""
|
||
# 读取Excel文件
|
||
# try:
|
||
# df = pd.read_excel(file_path, engine='openpyxl')
|
||
# except:
|
||
# df = pd.read_excel(file_path, engine='xlrd')
|
||
|
||
df = pd.read_excel(file_path)
|
||
# 所有列列统一数据格式为float
|
||
df = df.applymap(lambda x: float(x) if isinstance(x, (int, float)) else x)
|
||
|
||
# print(df.tail())
|
||
# 填充缺失值
|
||
df = df.fillna(method='ffill')
|
||
|
||
# 获取昨天,前天数据
|
||
df1 = df[-3:-1]
|
||
print(df1)
|
||
# 获取前一天的指定列值
|
||
previous_value = df1.iloc[0, column_index]
|
||
print('前一天的',previous_value,type(previous_value))
|
||
# 获取当前的指定列值
|
||
current_value = df1.iloc[1, column_index]
|
||
print('现在的',current_value,type(current_value))
|
||
# 判断指定列值的变化是否大于阈值
|
||
if abs(current_value - previous_value) > threshold:
|
||
# 如果变化大于阈值,将当前的指定列值改为前一天的值
|
||
df.iloc[-2, column_index] = previous_value
|
||
print('修改了')
|
||
# print(df.tail())
|
||
# 将修改后的数据写回Excel文件
|
||
df.to_excel(file_path, index=False,engine='openpyxl')
|
||
|
||
def getLogToken():
|
||
login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
else:
|
||
print("获取认证失败")
|
||
token = None
|
||
return token
|
||
|
||
def updateExcelDatabak(date='',token=None):
|
||
workbook = xlrd.open_workbook(read_file_path_name)
|
||
|
||
# 选择第一个表格
|
||
sheet = workbook.sheet_by_index(0)
|
||
|
||
row_data = sheet.row_values(1)
|
||
one_cols = row_data
|
||
|
||
cur_time,cur_time2 = getNow(date)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
# datas = search_value
|
||
if search_value:
|
||
datas = search_value
|
||
else :
|
||
datas = None
|
||
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
# for data_value in datas:
|
||
# dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
workbook = xlrd.open_workbook('定性模型数据项12-11.xlsx')
|
||
|
||
# 获取所有sheet的个数
|
||
sheet_count = len(workbook.sheet_names())
|
||
|
||
# 获取所有sheet的名称
|
||
sheet_names = workbook.sheet_names()
|
||
|
||
new_workbook = xlwt.Workbook()
|
||
for i in range(sheet_count):
|
||
# 获取当前sheet
|
||
sheet = workbook.sheet_by_index(i)
|
||
|
||
# 获取sheet的行数和列数
|
||
row_count = sheet.nrows
|
||
col_count = sheet.ncols
|
||
# 获取原有数据
|
||
data = []
|
||
for row in range(row_count):
|
||
row_data = []
|
||
for col in range(col_count):
|
||
row_data.append(sheet.cell_value(row, col))
|
||
data.append(row_data)
|
||
# 创建xlwt的Workbook对象
|
||
# 创建sheet
|
||
new_sheet = new_workbook.add_sheet(sheet_names[i])
|
||
|
||
# 将原有的数据写入新的sheet
|
||
for row in range(row_count):
|
||
for col in range(col_count):
|
||
new_sheet.write(row, col, data[row][col])
|
||
|
||
if i == 0:
|
||
|
||
# 在新的sheet中添加数据
|
||
for col in range(col_count):
|
||
new_sheet.write(row_count, col, append_rows[col])
|
||
|
||
# 保存新的xls文件
|
||
new_workbook.save("定性模型数据项12-11.xlsx")
|
||
|
||
def updateYesterdayExcelData(date='', token=None):
|
||
# 使用pandas读取Excel文件
|
||
df = pd.read_excel(read_file_path_name, engine='openpyxl')
|
||
|
||
# 获取第二行的数据作为列名
|
||
one_cols = df.iloc[0,:].tolist()
|
||
|
||
# 获取当前日期的前一天
|
||
if date == '':
|
||
previous_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
|
||
else:
|
||
# 字符串转日期
|
||
previous_date = (datetime.strptime(date, "%Y-%m-%d")-timedelta(days=1)).strftime('%Y-%m-%d')
|
||
|
||
|
||
cur_time, cur_time2 = getNow(previous_date)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
if search_value:
|
||
datas = search_value
|
||
else:
|
||
datas = None
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
print('更新数据前')
|
||
print(df.tail(1))
|
||
# 检查日期是否已存在于数据中
|
||
if previous_date not in df['日期'].values:
|
||
# 将新的数据添加到DataFrame中
|
||
new_row = pd.DataFrame([append_rows], columns=df.columns.tolist())
|
||
df = pd.concat([df, new_row], ignore_index=True)
|
||
else:
|
||
# 更新现有数据
|
||
print('日期存在,即将更新')
|
||
print('新数据',append_rows[1:])
|
||
df.loc[df['日期'] == previous_date, df.columns.tolist()[1:]] = append_rows[1:]
|
||
|
||
print('更新数据后')
|
||
print(df.tail(1))
|
||
# 使用pandas保存Excel文件
|
||
df.to_excel("定性模型数据项12-11.xlsx", index=False, engine='openpyxl')
|
||
|
||
|
||
def updateExcelData(date='', token=None):
|
||
# 使用pandas读取Excel文件
|
||
df = pd.read_excel(read_file_path_name, engine='openpyxl')
|
||
|
||
# 获取第一行的数据作为列名
|
||
# one_cols = df.columns.tolist()
|
||
|
||
# 获取第二行的数据作为列名
|
||
one_cols = df.iloc[0,:].tolist()
|
||
|
||
cur_time, cur_time2 = getNow(date)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
if search_value:
|
||
datas = search_value
|
||
else:
|
||
datas = None
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
# 将新的数据添加到DataFrame中
|
||
new_row = pd.DataFrame([append_rows], columns=df.columns.tolist())
|
||
df = pd.concat([df, new_row], ignore_index=True)
|
||
# df = df.append(pd.Series(append_rows), ignore_index=True)
|
||
|
||
# 使用pandas保存Excel文件
|
||
df.to_excel("定性模型数据项12-11.xlsx", index=False, engine='openpyxl')
|
||
|
||
|
||
def qualitativeModel():
|
||
df = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
|
||
df=df.fillna(df.ffill())
|
||
df1 = df[-3:-1].reset_index()
|
||
print(df1)
|
||
'''
|
||
# if df1.loc[1,'70号沥青开工率'] > 0.3:
|
||
2025年1月8日 修改:
|
||
复盘分析后发现2024-7月开始,开工率数据从0.28 变为了28 ,改为下面的判断规则
|
||
'''
|
||
if df1.loc[1,'70号沥青开工率'] / 100 > 0.3:
|
||
a = -(df1.loc[1,'70号沥青开工率'] / 100 -0.2)*5/0.1
|
||
else :
|
||
a = 0
|
||
b = df1.loc[1,'资金因素']
|
||
|
||
print('昨日计划提货偏差改之前',df1.loc[1,'昨日计划提货偏差'])
|
||
# 昨日计划提货偏差 = 京博产量 - 计划产量
|
||
df1.loc[1,'昨日计划提货偏差'] = df1.loc[1,'京博产量'] - df1.loc[1,'计划产量']
|
||
|
||
print('昨日计划提货偏差改之后',df1.loc[1,'昨日计划提货偏差'])
|
||
if df1.loc[1,'昨日计划提货偏差']>0:
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/2000
|
||
else :
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/3000
|
||
|
||
# 生产情况 = (京博产量 - 计划产量)/500*5
|
||
d = (df1.loc[1,'京博产量'] - df1.loc[1,'计划产量']) / 500 * 5
|
||
|
||
if df1.loc[1,'基质沥青库存']/265007 >0.8:
|
||
e = (df1.loc[1,'基质沥青库存'] - df1.loc[0,'基质沥青库存'])*10/-5000
|
||
else :
|
||
e = 0
|
||
# f = df1.loc[1,'下游客户价格预期']
|
||
f = 1 # 2025年1月23日修改:价格预期都按1计算
|
||
if abs(df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])>=100:
|
||
g = (df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])*50/100
|
||
else :
|
||
g = 0
|
||
h = df1.loc[1,'订单结构']
|
||
x = round(0.08*a+0*b+0.15*c+0.08*d +0.03*e +0.08*f +0.4*g+0.18*h+df1.loc[0,'京博指导价'],2)
|
||
return x
|
||
|
||
|
||
def getNow(date='',offset=0):
|
||
if date == '':
|
||
now = datetime.now() - timedelta(days=offset)
|
||
else:
|
||
try:
|
||
date = datetime.strptime(date, "%Y-%m-%d")
|
||
except:
|
||
date = datetime.strptime(date, "%Y%m%d")
|
||
now = date
|
||
|
||
year = now.year
|
||
month = now.month
|
||
day = now.day
|
||
|
||
if month < 10:
|
||
month = "0" + str(month)
|
||
if day < 10:
|
||
day = "0" + str(day)
|
||
cur_time = str(year) + str(month) + str(day)
|
||
cur_time2 = str(year) + "-" + str(month) + "-" + str(day)
|
||
return cur_time,cur_time2
|
||
|
||
def pushData(cur_time,x,token_push):
|
||
data1 = {
|
||
"funcModule": "数据表信息列表",
|
||
"funcOperation": "新增",
|
||
"data": [
|
||
{"dataItemNo": "C01100036|Forecast_Price|DX|ACN",
|
||
"dataDate": cur_time,
|
||
"dataStatus": "add",
|
||
"dataValue": x
|
||
}
|
||
]
|
||
}
|
||
headers1 = {"Authorization": token_push}
|
||
res = requests.post(url=upload_url, headers=headers1, json=data1, timeout=(3, 5))
|
||
|
||
def start_2(date='',token=None):
|
||
workbook = xlrd.open_workbook(read_file_path_name)
|
||
|
||
# 选择第一个表格
|
||
sheet = workbook.sheet_by_index(0)
|
||
|
||
# 获取行数和列数
|
||
num_rows = sheet.nrows
|
||
|
||
row_data = sheet.row_values(1)
|
||
one_cols = row_data
|
||
|
||
cur_time,cur_time2 = getNow(date)
|
||
|
||
|
||
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
# datas = search_value
|
||
if search_value:
|
||
datas = search_value
|
||
else :
|
||
datas = None
|
||
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
# for data_value in datas:
|
||
# dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
workbook = xlrd.open_workbook('定性模型数据项12-11.xlsx')
|
||
|
||
# 获取所有sheet的个数
|
||
sheet_count = len(workbook.sheet_names())
|
||
|
||
# 获取所有sheet的名称
|
||
sheet_names = workbook.sheet_names()
|
||
|
||
new_workbook = xlwt.Workbook()
|
||
for i in range(sheet_count):
|
||
# 获取当前sheet
|
||
sheet = workbook.sheet_by_index(i)
|
||
|
||
# 获取sheet的行数和列数
|
||
row_count = sheet.nrows
|
||
col_count = sheet.ncols
|
||
# 获取原有数据
|
||
data = []
|
||
for row in range(row_count):
|
||
row_data = []
|
||
for col in range(col_count):
|
||
row_data.append(sheet.cell_value(row, col))
|
||
data.append(row_data)
|
||
# 创建xlwt的Workbook对象
|
||
# 创建sheet
|
||
new_sheet = new_workbook.add_sheet(sheet_names[i])
|
||
|
||
# 将原有的数据写入新的sheet
|
||
for row in range(row_count):
|
||
for col in range(col_count):
|
||
new_sheet.write(row, col, data[row][col])
|
||
|
||
if i == 0:
|
||
|
||
# 在新的sheet中添加数据
|
||
for col in range(col_count):
|
||
new_sheet.write(row_count, col, append_rows[col])
|
||
|
||
# 保存新的xls文件
|
||
new_workbook.save("定性模型数据项12-11.xlsx")
|
||
|
||
update_e_value('定性模型数据项12-11.xlsx', 8, 1000)
|
||
|
||
df = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
|
||
df=df.fillna(df.ffill())
|
||
df1 = df[-2:].reset_index()
|
||
'''
|
||
# if df1.loc[1,'70号沥青开工率'] > 0.3:
|
||
2025年1月8日 修改:
|
||
复盘分析后发现2024-7月开始,开工率数据从0.28 变为了28 ,改为下面的判断规则
|
||
'''
|
||
if df1.loc[1,'70号沥青开工率'] > 30:
|
||
a = (df1.loc[1,'70号沥青开工率']-0.2)*5/0.1
|
||
else :
|
||
a = 0
|
||
b = df1.loc[1,'资金因素']
|
||
if df1.loc[1,'昨日计划提货偏差']>0:
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/2000
|
||
else :
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/3000
|
||
d = df1.loc[1,'生产情况']
|
||
if df1.loc[1,'基质沥青库存']/265007 >0.8:
|
||
e = (df1.loc[1,'基质沥青库存'] - df1.loc[0,'基质沥青库存'])*10/-5000
|
||
else :
|
||
e = 0
|
||
# f = df1.loc[1,'下游客户价格预期']
|
||
f = 1 # 2025年1月23日修改:价格预期都按1计算
|
||
if abs(df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])>=100:
|
||
g = (df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])*50/100
|
||
else :
|
||
g = 0
|
||
h = df1.loc[1,'订单结构']
|
||
x = round(0.08*a+0*b+0.15*c+0.08*d +0.03*e +0.08*f +0.4*g+0.18*h+df1.loc[0,'京博指导价'],2)
|
||
|
||
login_res1 = requests.post(url=login_push_url, json=login_push_data, timeout=(3, 5))
|
||
text1 = json.loads(login_res1.text)
|
||
token_push = text1["data"]["accessToken"]
|
||
|
||
|
||
data1 = {
|
||
"funcModule": "数据表信息列表",
|
||
"funcOperation": "新增",
|
||
"data": [
|
||
{"dataItemNo": "C01100036|Forecast_Price|DX|ACN",
|
||
"dataDate": cur_time,
|
||
"dataStatus": "add",
|
||
"dataValue": x
|
||
}
|
||
|
||
]
|
||
}
|
||
headers1 = {"Authorization": token_push}
|
||
# res = requests.post(url=upload_url, headers=headers1, json=data1, timeout=(3, 5))
|
||
|
||
|
||
def start(now=None):
|
||
workbook = xlrd.open_workbook(read_file_path_name)
|
||
|
||
|
||
|
||
# 选择第一个表格
|
||
sheet = workbook.sheet_by_index(0)
|
||
|
||
# 获取行数和列数
|
||
num_rows = sheet.nrows
|
||
|
||
|
||
|
||
row_data = sheet.row_values(1)
|
||
one_cols = row_data
|
||
|
||
|
||
login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
else:
|
||
print("获取认证失败")
|
||
token = None
|
||
|
||
if now is None:
|
||
now = datetime.now()
|
||
year = now.year
|
||
month = now.month
|
||
day = now.day
|
||
|
||
if month < 10:
|
||
month = "0" + str(month)
|
||
if day < 10:
|
||
day = "0" + str(day)
|
||
cur_time = str(year) + str(month) + str(day)
|
||
cur_time2 = str(year) + "-" + str(month) + "-" + str(day)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
# datas = search_value
|
||
if search_value:
|
||
datas = search_value
|
||
else :
|
||
datas = None
|
||
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
# for data_value in datas:
|
||
# dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
workbook = xlrd.open_workbook('定性模型数据项12-11.xlsx')
|
||
|
||
# 获取所有sheet的个数
|
||
sheet_count = len(workbook.sheet_names())
|
||
|
||
# 获取所有sheet的名称
|
||
sheet_names = workbook.sheet_names()
|
||
|
||
new_workbook = xlwt.Workbook()
|
||
for i in range(sheet_count):
|
||
# 获取当前sheet
|
||
sheet = workbook.sheet_by_index(i)
|
||
|
||
# 获取sheet的行数和列数
|
||
row_count = sheet.nrows
|
||
col_count = sheet.ncols
|
||
# 获取原有数据
|
||
data = []
|
||
for row in range(row_count):
|
||
row_data = []
|
||
for col in range(col_count):
|
||
row_data.append(sheet.cell_value(row, col))
|
||
data.append(row_data)
|
||
# 创建xlwt的Workbook对象
|
||
# 创建sheet
|
||
new_sheet = new_workbook.add_sheet(sheet_names[i])
|
||
|
||
# 将原有的数据写入新的sheet
|
||
for row in range(row_count):
|
||
for col in range(col_count):
|
||
new_sheet.write(row, col, data[row][col])
|
||
|
||
if i == 0:
|
||
# 在新的sheet中添加数据
|
||
for col in range(col_count):
|
||
new_sheet.write(row_count, col, append_rows[col])
|
||
|
||
# 保存新的xls文件
|
||
new_workbook.save("定性模型数据项12-11.xlsx")
|
||
|
||
|
||
update_e_value('定性模型数据项12-11.xlsx', 8, 1000)
|
||
|
||
df = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
df=df.fillna(df.ffill())
|
||
df1 = df[-2:].reset_index()
|
||
print(df1)
|
||
# if df1.loc[1,'70号沥青开工率'] > 0.3: -- 2025年1月9日 发版更改
|
||
if df1.loc[1,'70号沥青开工率'] / 100 > 0.3:
|
||
a = (df1.loc[1,'70号沥青开工率'] / 100 -0.2)*5/0.1
|
||
else :
|
||
a = 0
|
||
b = df1.loc[1,'资金因素']
|
||
if df1.loc[1,'昨日计划提货偏差']>0:
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/2000
|
||
else :
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/3000
|
||
d = df1.loc[1,'生产情况']
|
||
if df1.loc[1,'基质沥青库存']/265007 >0.8:
|
||
e = (df1.loc[1,'基质沥青库存'] - df1.loc[0,'基质沥青库存'])*10/-5000
|
||
else :
|
||
e = 0
|
||
# f = df1.loc[1,'下游客户价格预期']
|
||
f = 1 # 2025年1月23日修改:价格预期都按1计算
|
||
if abs(df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])>=100:
|
||
g = (df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])*50/100
|
||
else :
|
||
g = 0
|
||
h = df1.loc[1,'订单结构']
|
||
x = round(0.08*a+0*b+0.15*c+0.08*d +0.03*e +0.08*f +0.4*g+0.18*h+df1.loc[0,'京博指导价'],2)
|
||
|
||
|
||
# login_res1 = requests.post(url=login_url, json=login_data, timeout=(3, 30))
|
||
# text1 = json.loads(login_res1.text)
|
||
# token_push = text1["data"]["accessToken"]
|
||
|
||
|
||
# data1 = {
|
||
# "funcModule": "数据表信息列表",
|
||
# "funcOperation": "新增",
|
||
# "data": [
|
||
# {"dataItemNo": "C01100036|Forecast_Price|DX|ACN",
|
||
# "dataDate": cur_time,
|
||
# "dataStatus": "add",
|
||
# "dataValue": x
|
||
# }
|
||
|
||
# ]
|
||
# }
|
||
# headers1 = {"Authorization": token_push}
|
||
# res = requests.post(url=upload_url, headers=headers1, json=data1, timeout=(3, 5))
|
||
|
||
|
||
|
||
def start_test():
|
||
workbook = xlrd.open_workbook(read_file_path_name)
|
||
|
||
|
||
|
||
# 选择第一个表格
|
||
sheet = workbook.sheet_by_index(0)
|
||
|
||
# 获取行数和列数
|
||
num_rows = sheet.nrows
|
||
|
||
|
||
|
||
row_data = sheet.row_values(1)
|
||
one_cols = row_data
|
||
|
||
|
||
login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
else:
|
||
print("获取认证失败")
|
||
token = None
|
||
|
||
|
||
now = datetime.now()
|
||
year = now.year
|
||
month = now.month
|
||
day = now.day
|
||
|
||
if month < 10:
|
||
month = "0" + str(month)
|
||
if day < 10:
|
||
day = "0" + str(day)
|
||
cur_time = str(year) + str(month) + str(day)
|
||
cur_time2 = str(year) + "-" + str(month) + "-" + str(day)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
# datas = search_value
|
||
if search_value:
|
||
datas = search_value
|
||
else :
|
||
datas = None
|
||
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
# for data_value in datas:
|
||
# dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
workbook = xlrd.open_workbook('定性模型数据项12-11.xlsx')
|
||
|
||
# 获取所有sheet的个数
|
||
sheet_count = len(workbook.sheet_names())
|
||
|
||
# 获取所有sheet的名称
|
||
sheet_names = workbook.sheet_names()
|
||
|
||
new_workbook = xlwt.Workbook()
|
||
for i in range(sheet_count):
|
||
# 获取当前sheet
|
||
sheet = workbook.sheet_by_index(i)
|
||
|
||
# 获取sheet的行数和列数
|
||
row_count = sheet.nrows
|
||
col_count = sheet.ncols
|
||
# 获取原有数据
|
||
data = []
|
||
for row in range(row_count):
|
||
row_data = []
|
||
for col in range(col_count):
|
||
row_data.append(sheet.cell_value(row, col))
|
||
data.append(row_data)
|
||
# 创建xlwt的Workbook对象
|
||
# 创建sheet
|
||
new_sheet = new_workbook.add_sheet(sheet_names[i])
|
||
|
||
# 将原有的数据写入新的sheet
|
||
for row in range(row_count):
|
||
for col in range(col_count):
|
||
new_sheet.write(row, col, data[row][col])
|
||
|
||
if i == 0:
|
||
# 在新的sheet中添加数据
|
||
for col in range(col_count):
|
||
new_sheet.write(row_count, col, append_rows[col])
|
||
|
||
# 保存新的xls文件
|
||
new_workbook.save("定性模型数据项12-11.xlsx")
|
||
|
||
update_e_value('定性模型数据项12-11.xlsx', 8, 1000)
|
||
|
||
df = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
df=df.fillna(df.ffill())
|
||
df1 = df[-2:].reset_index()
|
||
# if df1.loc[1,'70号沥青开工率'] > 0.3: -- 2025年1月9日 发版更改
|
||
if df1.loc[1,'70号沥青开工率'] / 100 > 0.3:
|
||
a = (df1.loc[1,'70号沥青开工率'] / 100 -0.2)*5/0.1
|
||
else :
|
||
a = 0
|
||
b = df1.loc[1,'资金因素']
|
||
if df1.loc[1,'昨日计划提货偏差']>0:
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/2000
|
||
else :
|
||
c = df1.loc[1,'昨日计划提货偏差']*10/3000
|
||
d = df1.loc[1,'生产情况']
|
||
if df1.loc[1,'基质沥青库存']/265007 >0.8:
|
||
e = (df1.loc[1,'基质沥青库存'] - df1.loc[0,'基质沥青库存'])*10/-5000
|
||
else :
|
||
e = 0
|
||
# f = df1.loc[1,'下游客户价格预期']
|
||
f = 1 # 2025年1月23日修改:价格预期都按1计算
|
||
if abs(df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])>=100:
|
||
g = (df1.loc[1,'即期成本'] - df1.loc[0,'即期成本'])*50/100
|
||
else :
|
||
g = 0
|
||
h = df1.loc[1,'订单结构']
|
||
x = round(0.08*a+0*b+0.15*c+0.08*d +0.03*e +0.08*f +0.4*g+0.18*h+df1.loc[0,'京博指导价'],2)
|
||
|
||
|
||
# login_res1 = requests.post(url=login_url, json=login_data, timeout=(3, 30))
|
||
# text1 = json.loads(login_res1.text)
|
||
# token_push = text1["data"]["accessToken"]
|
||
|
||
|
||
# data1 = {
|
||
# "funcModule": "数据表信息列表",
|
||
# "funcOperation": "新增",
|
||
# "data": [
|
||
# {"dataItemNo": "C01100036|Forecast_Price|DX|ACN",
|
||
# "dataDate": cur_time,
|
||
# "dataStatus": "add",
|
||
# "dataValue": x
|
||
# }
|
||
|
||
# ]
|
||
# }
|
||
# headers1 = {"Authorization": token_push}
|
||
# res = requests.post(url=upload_url, headers=headers1, json=data1, timeout=(3, 5))
|
||
|
||
|
||
|
||
|
||
|
||
def start_1():
|
||
workbook = xlrd.open_workbook(read_file_path_name)
|
||
|
||
|
||
|
||
# 选择第一个表格
|
||
sheet = workbook.sheet_by_index(0)
|
||
|
||
# 获取行数和列数
|
||
num_rows = sheet.nrows
|
||
|
||
|
||
|
||
row_data = sheet.row_values(1)
|
||
one_cols = row_data
|
||
|
||
|
||
login_res = requests.post(url=login_url, json=login_data, timeout=(3, 5))
|
||
text = json.loads(login_res.text)
|
||
if text["status"]:
|
||
token = text["data"]["accessToken"]
|
||
else:
|
||
print("获取认证失败")
|
||
token = None
|
||
|
||
|
||
now = datetime.now() - timedelta(days=1)
|
||
year = now.year
|
||
month = now.month
|
||
day = now.day
|
||
|
||
if month < 10:
|
||
month = "0" + str(month)
|
||
if day < 10:
|
||
day = "0" + str(day)
|
||
cur_time = str(year) + str(month) + str(day)
|
||
cur_time2 = str(year) + "-" + str(month) + "-" + str(day)
|
||
search_data = {
|
||
"data": {
|
||
"date": cur_time,
|
||
"dataItemNoList": one_cols[1:]
|
||
},
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询"
|
||
}
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=search_url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
# datas = search_value
|
||
if search_value:
|
||
datas = search_value
|
||
else :
|
||
datas = None
|
||
|
||
|
||
|
||
append_rows = [cur_time2]
|
||
dataItemNo_dataValue = {}
|
||
# for data_value in datas:
|
||
# dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for data_value in datas:
|
||
if "dataValue" not in data_value:
|
||
print(data_value)
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = ""
|
||
else:
|
||
dataItemNo_dataValue[data_value["dataItemNo"]] = data_value["dataValue"]
|
||
for value in one_cols[1:]:
|
||
if value in dataItemNo_dataValue:
|
||
append_rows.append(dataItemNo_dataValue[value])
|
||
else:
|
||
append_rows.append("")
|
||
|
||
workbook = xlrd.open_workbook('定性模型数据项12-11.xlsx')
|
||
|
||
# 获取所有sheet的个数
|
||
sheet_count = len(workbook.sheet_names())
|
||
|
||
# 获取所有sheet的名称
|
||
sheet_names = workbook.sheet_names()
|
||
|
||
new_workbook = xlwt.Workbook()
|
||
for i in range(sheet_count):
|
||
# 获取当前sheet
|
||
sheet = workbook.sheet_by_index(i)
|
||
|
||
# 获取sheet的行数和列数
|
||
row_count = sheet.nrows - 1
|
||
col_count = sheet.ncols
|
||
# 获取原有数据
|
||
data = []
|
||
for row in range(row_count):
|
||
row_data = []
|
||
for col in range(col_count):
|
||
row_data.append(sheet.cell_value(row, col))
|
||
data.append(row_data)
|
||
# 创建xlwt的Workbook对象
|
||
# 创建sheet
|
||
new_sheet = new_workbook.add_sheet(sheet_names[i])
|
||
|
||
# 将原有的数据写入新的sheet
|
||
for row in range(row_count):
|
||
for col in range(col_count):
|
||
new_sheet.write(row, col, data[row][col])
|
||
|
||
if i == 0:
|
||
# 在新的sheet中添加数据
|
||
for col in range(col_count):
|
||
new_sheet.write(row_count, col, append_rows[col])
|
||
|
||
# 保存新的xls文件
|
||
new_workbook.save("定性模型数据项12-11.xlsx")
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
def get_queryDataListItemNos_value(token, url, dataItemNoList, dateStart, dateEnd):
|
||
|
||
search_data = {
|
||
"funcModule": "数据项",
|
||
"funcOperation": "查询",
|
||
"data": {
|
||
"dateStart": dateStart,
|
||
"dateEnd": dateEnd,
|
||
"dataItemNoList": dataItemNoList # 数据项编码,代表 brent最低价和最高价
|
||
}
|
||
}
|
||
|
||
headers = {"Authorization": token}
|
||
search_res = requests.post(url=url, headers=headers, json=search_data, timeout=(3, 5))
|
||
search_value = json.loads(search_res.text)["data"]
|
||
if search_value:
|
||
return search_value
|
||
else:
|
||
return None
|
||
|
||
|
||
|
||
def save_queryDataListItemNos_xls(data_df,dataItemNoList):
|
||
|
||
current_year_month = datetime.now().strftime('%Y-%m')
|
||
grouped = data_df.groupby("dataDate")
|
||
|
||
|
||
df_old = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
df_old0 = df_old[:1]
|
||
result_dict = {df_old0.iloc[0][col] : col for col in df_old0.columns}
|
||
df_old1 = df_old[1:].copy()
|
||
|
||
df_old1["日期"] = pd.to_datetime(df_old1["日期"])
|
||
# 删除日期列为本月的数据
|
||
df_old1 = df_old1[~df_old1["日期"].dt.strftime('%Y-%m').eq(current_year_month)]
|
||
df_old1["日期"] = df_old1["日期"].dt.strftime('%Y-%m-%d')
|
||
|
||
|
||
list_data = []
|
||
for date, group in grouped:
|
||
dict_data = {"日期": date}
|
||
for index, row in group.iterrows():
|
||
dict_data[result_dict[row['dataItemNo']]] = row['dataValue']
|
||
list_data.append(dict_data)
|
||
|
||
df_current_year_month = pd.DataFrame(list_data)
|
||
df_current_year_month
|
||
|
||
df_merged = pd.concat([df_old0, df_old1, df_current_year_month], ignore_index=True)
|
||
|
||
df_merged.to_excel('定性模型数据项12-11.xlsx', index=False)
|
||
|
||
|
||
|
||
def queryDataListItemNos(date=None,token=None):
|
||
df = pd.read_excel('定性模型数据项12-11.xlsx')
|
||
dataItemNoList = df.iloc[0].tolist()[1:]
|
||
|
||
if token is None:
|
||
token = getLogToken()
|
||
if token is None:
|
||
print("获取token失败")
|
||
return
|
||
|
||
# 获取当前日期
|
||
if date is None:
|
||
date = datetime.now()
|
||
current_date = date
|
||
|
||
# 获取当月1日
|
||
first_day_of_month = current_date.replace(day=1)
|
||
|
||
# 格式化为 YYYYMMDD 格式
|
||
dateEnd = current_date.strftime('%Y%m%d')
|
||
dateStart = first_day_of_month.strftime('%Y%m%d')
|
||
|
||
search_value = get_queryDataListItemNos_value(token, queryDataListItemNos_url, dataItemNoList, dateStart, dateEnd)
|
||
# print("search_value",search_value)
|
||
|
||
|
||
data_df = pd.DataFrame(search_value)
|
||
|
||
data_df["dataDate"] = pd.to_datetime(data_df["dataDate"])
|
||
data_df["dataDate"] = data_df["dataDate"].dt.strftime('%Y-%m-%d')
|
||
save_queryDataListItemNos_xls(data_df,dataItemNoList)
|
||
|
||
def main(start_date=None,token=None,token_push=None):
|
||
if start_date is None:
|
||
start_date = datetime.now()
|
||
if token is None:
|
||
token = get_head_auth()
|
||
if token_push is None:
|
||
token_push = get_head_push_auth()
|
||
date = start_date.strftime('%Y%m%d')
|
||
print(date)
|
||
updateExcelData(date,token)
|
||
queryDataListItemNos(token=token)
|
||
update_e_value('定性模型数据项12-11.xlsx', 8, 1000)
|
||
x = qualitativeModel()
|
||
print('**************************************************预测结果:',x)
|
||
cur_time,cur_time2 = getNow(date)
|
||
pushData(cur_time,x,token)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
print("运行中...")
|
||
main()
|
||
|